diff --git a/docs/dyn/accessapproval_v1.folders.html b/docs/dyn/accessapproval_v1.folders.html index bca1e5f5bb2..1c58dd61212 100644 --- a/docs/dyn/accessapproval_v1.folders.html +++ b/docs/dyn/accessapproval_v1.folders.html @@ -133,7 +133,7 @@

Method Details

"enrolledAncestor": True or False, # Output only. This field is read only (not settable via UpdateAccessAccessApprovalSettings method). If the field is true, that indicates that at least one service is enrolled for Access Approval in one or more ancestors of the Project or Folder (this field will always be unset for the organization since organizations do not have ancestors). "enrolledServices": [ # A list of Google Cloud Services for which the given resource has Access Approval enrolled. Access requests for the resource given by name against any of these services contained here will be required to have explicit approval. If name refers to an organization, enrollment can be done for individual services. If name refers to a folder or project, enrollment can only be done on an all or nothing basis. If a cloud_product is repeated in this list, the first entry will be honored and all following entries will be discarded. A maximum of 10 enrolled services will be enforced, to be expanded as the set of supported services is expanded. { # Represents the enrollment of a cloud resource into a specific service. - "cloudProduct": "A String", # The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * App Engine * BigQuery * Cloud Bigtable * Cloud Key Management Service * Compute Engine * Cloud Dataflow * Cloud Identity and Access Management * Cloud Logging * Cloud Pub/Sub * Cloud Spanner * Cloud SQL * Cloud Storage * Google Kubernetes Engine * Persistent Disk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * appengine.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * iam.googleapis.com * logging.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services + "cloudProduct": "A String", # The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * GA * App Engine * BigQuery * Cloud Bigtable * Cloud Key Management Service * Compute Engine * Cloud Dataflow * Cloud DLP * Cloud EKM * Cloud HSM * Cloud Identity and Access Management * Cloud Logging * Cloud Pub/Sub * Cloud Spanner * Cloud SQL * Cloud Storage * Google Kubernetes Engine * Persistent Disk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * ga-only * appengine.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * dlp.googleapis.com * iam.googleapis.com * logging.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services "enrollmentLevel": "A String", # The enrollment level of the service. }, ], @@ -157,7 +157,7 @@

Method Details

"enrolledAncestor": True or False, # Output only. This field is read only (not settable via UpdateAccessAccessApprovalSettings method). If the field is true, that indicates that at least one service is enrolled for Access Approval in one or more ancestors of the Project or Folder (this field will always be unset for the organization since organizations do not have ancestors). "enrolledServices": [ # A list of Google Cloud Services for which the given resource has Access Approval enrolled. Access requests for the resource given by name against any of these services contained here will be required to have explicit approval. If name refers to an organization, enrollment can be done for individual services. If name refers to a folder or project, enrollment can only be done on an all or nothing basis. If a cloud_product is repeated in this list, the first entry will be honored and all following entries will be discarded. A maximum of 10 enrolled services will be enforced, to be expanded as the set of supported services is expanded. { # Represents the enrollment of a cloud resource into a specific service. - "cloudProduct": "A String", # The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * App Engine * BigQuery * Cloud Bigtable * Cloud Key Management Service * Compute Engine * Cloud Dataflow * Cloud Identity and Access Management * Cloud Logging * Cloud Pub/Sub * Cloud Spanner * Cloud SQL * Cloud Storage * Google Kubernetes Engine * Persistent Disk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * appengine.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * iam.googleapis.com * logging.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services + "cloudProduct": "A String", # The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * GA * App Engine * BigQuery * Cloud Bigtable * Cloud Key Management Service * Compute Engine * Cloud Dataflow * Cloud DLP * Cloud EKM * Cloud HSM * Cloud Identity and Access Management * Cloud Logging * Cloud Pub/Sub * Cloud Spanner * Cloud SQL * Cloud Storage * Google Kubernetes Engine * Persistent Disk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * ga-only * appengine.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * dlp.googleapis.com * iam.googleapis.com * logging.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services "enrollmentLevel": "A String", # The enrollment level of the service. }, ], @@ -180,7 +180,7 @@

Method Details

"enrolledAncestor": True or False, # Output only. This field is read only (not settable via UpdateAccessAccessApprovalSettings method). If the field is true, that indicates that at least one service is enrolled for Access Approval in one or more ancestors of the Project or Folder (this field will always be unset for the organization since organizations do not have ancestors). "enrolledServices": [ # A list of Google Cloud Services for which the given resource has Access Approval enrolled. Access requests for the resource given by name against any of these services contained here will be required to have explicit approval. If name refers to an organization, enrollment can be done for individual services. If name refers to a folder or project, enrollment can only be done on an all or nothing basis. If a cloud_product is repeated in this list, the first entry will be honored and all following entries will be discarded. A maximum of 10 enrolled services will be enforced, to be expanded as the set of supported services is expanded. { # Represents the enrollment of a cloud resource into a specific service. - "cloudProduct": "A String", # The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * App Engine * BigQuery * Cloud Bigtable * Cloud Key Management Service * Compute Engine * Cloud Dataflow * Cloud Identity and Access Management * Cloud Logging * Cloud Pub/Sub * Cloud Spanner * Cloud SQL * Cloud Storage * Google Kubernetes Engine * Persistent Disk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * appengine.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * iam.googleapis.com * logging.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services + "cloudProduct": "A String", # The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * GA * App Engine * BigQuery * Cloud Bigtable * Cloud Key Management Service * Compute Engine * Cloud Dataflow * Cloud DLP * Cloud EKM * Cloud HSM * Cloud Identity and Access Management * Cloud Logging * Cloud Pub/Sub * Cloud Spanner * Cloud SQL * Cloud Storage * Google Kubernetes Engine * Persistent Disk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * ga-only * appengine.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * dlp.googleapis.com * iam.googleapis.com * logging.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services "enrollmentLevel": "A String", # The enrollment level of the service. }, ], diff --git a/docs/dyn/accessapproval_v1.organizations.html b/docs/dyn/accessapproval_v1.organizations.html index 7065c0f9c63..b7122d2ab0f 100644 --- a/docs/dyn/accessapproval_v1.organizations.html +++ b/docs/dyn/accessapproval_v1.organizations.html @@ -133,7 +133,7 @@

Method Details

"enrolledAncestor": True or False, # Output only. This field is read only (not settable via UpdateAccessAccessApprovalSettings method). If the field is true, that indicates that at least one service is enrolled for Access Approval in one or more ancestors of the Project or Folder (this field will always be unset for the organization since organizations do not have ancestors). "enrolledServices": [ # A list of Google Cloud Services for which the given resource has Access Approval enrolled. Access requests for the resource given by name against any of these services contained here will be required to have explicit approval. If name refers to an organization, enrollment can be done for individual services. If name refers to a folder or project, enrollment can only be done on an all or nothing basis. If a cloud_product is repeated in this list, the first entry will be honored and all following entries will be discarded. A maximum of 10 enrolled services will be enforced, to be expanded as the set of supported services is expanded. { # Represents the enrollment of a cloud resource into a specific service. - "cloudProduct": "A String", # The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * App Engine * BigQuery * Cloud Bigtable * Cloud Key Management Service * Compute Engine * Cloud Dataflow * Cloud Identity and Access Management * Cloud Logging * Cloud Pub/Sub * Cloud Spanner * Cloud SQL * Cloud Storage * Google Kubernetes Engine * Persistent Disk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * appengine.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * iam.googleapis.com * logging.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services + "cloudProduct": "A String", # The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * GA * App Engine * BigQuery * Cloud Bigtable * Cloud Key Management Service * Compute Engine * Cloud Dataflow * Cloud DLP * Cloud EKM * Cloud HSM * Cloud Identity and Access Management * Cloud Logging * Cloud Pub/Sub * Cloud Spanner * Cloud SQL * Cloud Storage * Google Kubernetes Engine * Persistent Disk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * ga-only * appengine.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * dlp.googleapis.com * iam.googleapis.com * logging.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services "enrollmentLevel": "A String", # The enrollment level of the service. }, ], @@ -157,7 +157,7 @@

Method Details

"enrolledAncestor": True or False, # Output only. This field is read only (not settable via UpdateAccessAccessApprovalSettings method). If the field is true, that indicates that at least one service is enrolled for Access Approval in one or more ancestors of the Project or Folder (this field will always be unset for the organization since organizations do not have ancestors). "enrolledServices": [ # A list of Google Cloud Services for which the given resource has Access Approval enrolled. Access requests for the resource given by name against any of these services contained here will be required to have explicit approval. If name refers to an organization, enrollment can be done for individual services. If name refers to a folder or project, enrollment can only be done on an all or nothing basis. If a cloud_product is repeated in this list, the first entry will be honored and all following entries will be discarded. A maximum of 10 enrolled services will be enforced, to be expanded as the set of supported services is expanded. { # Represents the enrollment of a cloud resource into a specific service. - "cloudProduct": "A String", # The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * App Engine * BigQuery * Cloud Bigtable * Cloud Key Management Service * Compute Engine * Cloud Dataflow * Cloud Identity and Access Management * Cloud Logging * Cloud Pub/Sub * Cloud Spanner * Cloud SQL * Cloud Storage * Google Kubernetes Engine * Persistent Disk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * appengine.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * iam.googleapis.com * logging.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services + "cloudProduct": "A String", # The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * GA * App Engine * BigQuery * Cloud Bigtable * Cloud Key Management Service * Compute Engine * Cloud Dataflow * Cloud DLP * Cloud EKM * Cloud HSM * Cloud Identity and Access Management * Cloud Logging * Cloud Pub/Sub * Cloud Spanner * Cloud SQL * Cloud Storage * Google Kubernetes Engine * Persistent Disk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * ga-only * appengine.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * dlp.googleapis.com * iam.googleapis.com * logging.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services "enrollmentLevel": "A String", # The enrollment level of the service. }, ], @@ -180,7 +180,7 @@

Method Details

"enrolledAncestor": True or False, # Output only. This field is read only (not settable via UpdateAccessAccessApprovalSettings method). If the field is true, that indicates that at least one service is enrolled for Access Approval in one or more ancestors of the Project or Folder (this field will always be unset for the organization since organizations do not have ancestors). "enrolledServices": [ # A list of Google Cloud Services for which the given resource has Access Approval enrolled. Access requests for the resource given by name against any of these services contained here will be required to have explicit approval. If name refers to an organization, enrollment can be done for individual services. If name refers to a folder or project, enrollment can only be done on an all or nothing basis. If a cloud_product is repeated in this list, the first entry will be honored and all following entries will be discarded. A maximum of 10 enrolled services will be enforced, to be expanded as the set of supported services is expanded. { # Represents the enrollment of a cloud resource into a specific service. - "cloudProduct": "A String", # The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * App Engine * BigQuery * Cloud Bigtable * Cloud Key Management Service * Compute Engine * Cloud Dataflow * Cloud Identity and Access Management * Cloud Logging * Cloud Pub/Sub * Cloud Spanner * Cloud SQL * Cloud Storage * Google Kubernetes Engine * Persistent Disk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * appengine.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * iam.googleapis.com * logging.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services + "cloudProduct": "A String", # The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * GA * App Engine * BigQuery * Cloud Bigtable * Cloud Key Management Service * Compute Engine * Cloud Dataflow * Cloud DLP * Cloud EKM * Cloud HSM * Cloud Identity and Access Management * Cloud Logging * Cloud Pub/Sub * Cloud Spanner * Cloud SQL * Cloud Storage * Google Kubernetes Engine * Persistent Disk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * ga-only * appengine.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * dlp.googleapis.com * iam.googleapis.com * logging.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services "enrollmentLevel": "A String", # The enrollment level of the service. }, ], diff --git a/docs/dyn/accessapproval_v1.projects.html b/docs/dyn/accessapproval_v1.projects.html index 28e640383a1..e663ea3d90d 100644 --- a/docs/dyn/accessapproval_v1.projects.html +++ b/docs/dyn/accessapproval_v1.projects.html @@ -133,7 +133,7 @@

Method Details

"enrolledAncestor": True or False, # Output only. This field is read only (not settable via UpdateAccessAccessApprovalSettings method). If the field is true, that indicates that at least one service is enrolled for Access Approval in one or more ancestors of the Project or Folder (this field will always be unset for the organization since organizations do not have ancestors). "enrolledServices": [ # A list of Google Cloud Services for which the given resource has Access Approval enrolled. Access requests for the resource given by name against any of these services contained here will be required to have explicit approval. If name refers to an organization, enrollment can be done for individual services. If name refers to a folder or project, enrollment can only be done on an all or nothing basis. If a cloud_product is repeated in this list, the first entry will be honored and all following entries will be discarded. A maximum of 10 enrolled services will be enforced, to be expanded as the set of supported services is expanded. { # Represents the enrollment of a cloud resource into a specific service. - "cloudProduct": "A String", # The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * App Engine * BigQuery * Cloud Bigtable * Cloud Key Management Service * Compute Engine * Cloud Dataflow * Cloud Identity and Access Management * Cloud Logging * Cloud Pub/Sub * Cloud Spanner * Cloud SQL * Cloud Storage * Google Kubernetes Engine * Persistent Disk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * appengine.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * iam.googleapis.com * logging.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services + "cloudProduct": "A String", # The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * GA * App Engine * BigQuery * Cloud Bigtable * Cloud Key Management Service * Compute Engine * Cloud Dataflow * Cloud DLP * Cloud EKM * Cloud HSM * Cloud Identity and Access Management * Cloud Logging * Cloud Pub/Sub * Cloud Spanner * Cloud SQL * Cloud Storage * Google Kubernetes Engine * Persistent Disk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * ga-only * appengine.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * dlp.googleapis.com * iam.googleapis.com * logging.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services "enrollmentLevel": "A String", # The enrollment level of the service. }, ], @@ -157,7 +157,7 @@

Method Details

"enrolledAncestor": True or False, # Output only. This field is read only (not settable via UpdateAccessAccessApprovalSettings method). If the field is true, that indicates that at least one service is enrolled for Access Approval in one or more ancestors of the Project or Folder (this field will always be unset for the organization since organizations do not have ancestors). "enrolledServices": [ # A list of Google Cloud Services for which the given resource has Access Approval enrolled. Access requests for the resource given by name against any of these services contained here will be required to have explicit approval. If name refers to an organization, enrollment can be done for individual services. If name refers to a folder or project, enrollment can only be done on an all or nothing basis. If a cloud_product is repeated in this list, the first entry will be honored and all following entries will be discarded. A maximum of 10 enrolled services will be enforced, to be expanded as the set of supported services is expanded. { # Represents the enrollment of a cloud resource into a specific service. - "cloudProduct": "A String", # The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * App Engine * BigQuery * Cloud Bigtable * Cloud Key Management Service * Compute Engine * Cloud Dataflow * Cloud Identity and Access Management * Cloud Logging * Cloud Pub/Sub * Cloud Spanner * Cloud SQL * Cloud Storage * Google Kubernetes Engine * Persistent Disk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * appengine.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * iam.googleapis.com * logging.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services + "cloudProduct": "A String", # The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * GA * App Engine * BigQuery * Cloud Bigtable * Cloud Key Management Service * Compute Engine * Cloud Dataflow * Cloud DLP * Cloud EKM * Cloud HSM * Cloud Identity and Access Management * Cloud Logging * Cloud Pub/Sub * Cloud Spanner * Cloud SQL * Cloud Storage * Google Kubernetes Engine * Persistent Disk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * ga-only * appengine.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * dlp.googleapis.com * iam.googleapis.com * logging.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services "enrollmentLevel": "A String", # The enrollment level of the service. }, ], @@ -180,7 +180,7 @@

Method Details

"enrolledAncestor": True or False, # Output only. This field is read only (not settable via UpdateAccessAccessApprovalSettings method). If the field is true, that indicates that at least one service is enrolled for Access Approval in one or more ancestors of the Project or Folder (this field will always be unset for the organization since organizations do not have ancestors). "enrolledServices": [ # A list of Google Cloud Services for which the given resource has Access Approval enrolled. Access requests for the resource given by name against any of these services contained here will be required to have explicit approval. If name refers to an organization, enrollment can be done for individual services. If name refers to a folder or project, enrollment can only be done on an all or nothing basis. If a cloud_product is repeated in this list, the first entry will be honored and all following entries will be discarded. A maximum of 10 enrolled services will be enforced, to be expanded as the set of supported services is expanded. { # Represents the enrollment of a cloud resource into a specific service. - "cloudProduct": "A String", # The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * App Engine * BigQuery * Cloud Bigtable * Cloud Key Management Service * Compute Engine * Cloud Dataflow * Cloud Identity and Access Management * Cloud Logging * Cloud Pub/Sub * Cloud Spanner * Cloud SQL * Cloud Storage * Google Kubernetes Engine * Persistent Disk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * appengine.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * iam.googleapis.com * logging.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services + "cloudProduct": "A String", # The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * GA * App Engine * BigQuery * Cloud Bigtable * Cloud Key Management Service * Compute Engine * Cloud Dataflow * Cloud DLP * Cloud EKM * Cloud HSM * Cloud Identity and Access Management * Cloud Logging * Cloud Pub/Sub * Cloud Spanner * Cloud SQL * Cloud Storage * Google Kubernetes Engine * Persistent Disk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * ga-only * appengine.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * dlp.googleapis.com * iam.googleapis.com * logging.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services "enrollmentLevel": "A String", # The enrollment level of the service. }, ], diff --git a/docs/dyn/adsense_v2.accounts.reports.html b/docs/dyn/adsense_v2.accounts.reports.html index e3768025a42..1d35ae4094c 100644 --- a/docs/dyn/adsense_v2.accounts.reports.html +++ b/docs/dyn/adsense_v2.accounts.reports.html @@ -153,6 +153,8 @@

Method Details

REQUESTED_AD_TYPE_CODE - Requested ad type code (e.g. "IMAGE", "RADLINK", "OTHER"). SERVED_AD_TYPE_NAME - Localized served ad type name (e.g. "Display", "Link unit", "Other"). SERVED_AD_TYPE_CODE - Served ad type code (e.g. "IMAGE", "RADLINK", "OTHER"). + AD_FORMAT_NAME - Localized ad format name indicating the way an ad is shown to the users on your site (e.g. "In-page", "Anchor", "Vignette"). + AD_FORMAT_CODE - Ad format code indicating the way an ad is shown to the users on your site (e.g. "ON_PAGE", "ANCHOR", "INTERSTITIAL"). CUSTOM_SEARCH_STYLE_NAME - Custom search style name. CUSTOM_SEARCH_STYLE_ID - Custom search style id. DOMAIN_REGISTRANT - Domain registrants. @@ -322,6 +324,8 @@

Method Details

REQUESTED_AD_TYPE_CODE - Requested ad type code (e.g. "IMAGE", "RADLINK", "OTHER"). SERVED_AD_TYPE_NAME - Localized served ad type name (e.g. "Display", "Link unit", "Other"). SERVED_AD_TYPE_CODE - Served ad type code (e.g. "IMAGE", "RADLINK", "OTHER"). + AD_FORMAT_NAME - Localized ad format name indicating the way an ad is shown to the users on your site (e.g. "In-page", "Anchor", "Vignette"). + AD_FORMAT_CODE - Ad format code indicating the way an ad is shown to the users on your site (e.g. "ON_PAGE", "ANCHOR", "INTERSTITIAL"). CUSTOM_SEARCH_STYLE_NAME - Custom search style name. CUSTOM_SEARCH_STYLE_ID - Custom search style id. DOMAIN_REGISTRANT - Domain registrants. diff --git a/docs/dyn/artifactregistry_v1beta2.projects.locations.repositories.aptArtifacts.html b/docs/dyn/artifactregistry_v1beta2.projects.locations.repositories.aptArtifacts.html new file mode 100644 index 00000000000..431b2792697 --- /dev/null +++ b/docs/dyn/artifactregistry_v1beta2.projects.locations.repositories.aptArtifacts.html @@ -0,0 +1,136 @@ + + + +

Artifact Registry API . projects . locations . repositories . aptArtifacts

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ import_(parent, body=None, x__xgafv=None)

+

Imports Apt artifacts. The returned Operation will complete once the resources are imported. Package, Version, and File resources are created based on the imported artifacts. Imported artifacts that conflict with existing resources are ignored.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ import_(parent, body=None, x__xgafv=None) +
Imports Apt artifacts. The returned Operation will complete once the resources are imported. Package, Version, and File resources are created based on the imported artifacts. Imported artifacts that conflict with existing resources are ignored.
+
+Args:
+  parent: string, The name of the parent resource where the artifacts will be imported. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The request to import new apt artifacts.
+  "gcsSource": { # Google Cloud Storage location where the artifacts currently reside. # Google Cloud Storage location where input content is located.
+    "uris": [ # Cloud Storage paths URI (e.g., gs://my_bucket//my_object).
+      "A String",
+    ],
+    "useWildcards": True or False, # Supports URI wildcards for matching multiple objects from a single URI.
+  },
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/artifactregistry_v1beta2.projects.locations.repositories.html b/docs/dyn/artifactregistry_v1beta2.projects.locations.repositories.html index fccc1558412..4216667b4ac 100644 --- a/docs/dyn/artifactregistry_v1beta2.projects.locations.repositories.html +++ b/docs/dyn/artifactregistry_v1beta2.projects.locations.repositories.html @@ -74,6 +74,11 @@

Artifact Registry API . projects . locations . repositories

Instance Methods

+

+ aptArtifacts() +

+

Returns the aptArtifacts Resource.

+

files()

@@ -84,6 +89,11 @@

Instance Methods

Returns the packages Resource.

+

+ yumArtifacts() +

+

Returns the yumArtifacts Resource.

+

close()

Close httplib2 connections.

diff --git a/docs/dyn/artifactregistry_v1beta2.projects.locations.repositories.yumArtifacts.html b/docs/dyn/artifactregistry_v1beta2.projects.locations.repositories.yumArtifacts.html new file mode 100644 index 00000000000..f1cf7b0f94a --- /dev/null +++ b/docs/dyn/artifactregistry_v1beta2.projects.locations.repositories.yumArtifacts.html @@ -0,0 +1,136 @@ + + + +

Artifact Registry API . projects . locations . repositories . yumArtifacts

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ import_(parent, body=None, x__xgafv=None)

+

Imports Yum (RPM) artifacts. The returned Operation will complete once the resources are imported. Package, Version, and File resources are created based on the imported artifacts. Imported artifacts that conflict with existing resources are ignored.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ import_(parent, body=None, x__xgafv=None) +
Imports Yum (RPM) artifacts. The returned Operation will complete once the resources are imported. Package, Version, and File resources are created based on the imported artifacts. Imported artifacts that conflict with existing resources are ignored.
+
+Args:
+  parent: string, The name of the parent resource where the artifacts will be imported. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The request to import new yum artifacts.
+  "gcsSource": { # Google Cloud Storage location where the artifacts currently reside. # Google Cloud Storage location where input content is located.
+    "uris": [ # Cloud Storage paths URI (e.g., gs://my_bucket//my_object).
+      "A String",
+    ],
+    "useWildcards": True or False, # Supports URI wildcards for matching multiple objects from a single URI.
+  },
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/baremetalsolution_v1alpha1.html b/docs/dyn/baremetalsolution_v1alpha1.html new file mode 100644 index 00000000000..677cf89a27e --- /dev/null +++ b/docs/dyn/baremetalsolution_v1alpha1.html @@ -0,0 +1,111 @@ + + + +

Bare Metal Solution API

+

Instance Methods

+

+ projects() +

+

Returns the projects Resource.

+ +

+ close()

+

Close httplib2 connections.

+

+ new_batch_http_request()

+

Create a BatchHttpRequest object based on the discovery document.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ new_batch_http_request() +
Create a BatchHttpRequest object based on the discovery document.
+
+        Args:
+          callback: callable, A callback to be called for each response, of the
+            form callback(id, response, exception). The first parameter is the
+            request id, and the second is the deserialized response object. The
+            third is an apiclient.errors.HttpError exception object if an HTTP
+            error occurred while processing the request, or None if no error
+            occurred.
+
+        Returns:
+          A BatchHttpRequest object based on the discovery document.
+        
+
+ + \ No newline at end of file diff --git a/docs/dyn/baremetalsolution_v1alpha1.projects.html b/docs/dyn/baremetalsolution_v1alpha1.projects.html new file mode 100644 index 00000000000..ef903cd2dae --- /dev/null +++ b/docs/dyn/baremetalsolution_v1alpha1.projects.html @@ -0,0 +1,106 @@ + + + +

Bare Metal Solution API . projects

+

Instance Methods

+

+ locations() +

+

Returns the locations Resource.

+ +

+ provisioningQuotas() +

+

Returns the provisioningQuotas Resource.

+ +

+ snapshotSchedulePolicies() +

+

Returns the snapshotSchedulePolicies Resource.

+ +

+ sshKeys() +

+

Returns the sshKeys Resource.

+ +

+ close()

+

Close httplib2 connections.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ + \ No newline at end of file diff --git a/docs/dyn/baremetalsolution_v1alpha1.projects.locations.html b/docs/dyn/baremetalsolution_v1alpha1.projects.locations.html new file mode 100644 index 00000000000..4fea5ec5527 --- /dev/null +++ b/docs/dyn/baremetalsolution_v1alpha1.projects.locations.html @@ -0,0 +1,259 @@ + + + +

Bare Metal Solution API . projects . locations

+

Instance Methods

+

+ instances() +

+

Returns the instances Resource.

+ +

+ luns() +

+

Returns the luns Resource.

+ +

+ volumes() +

+

Returns the volumes Resource.

+ +

+ close()

+

Close httplib2 connections.

+

+ submitProvisioningConfig(project, location, body=None, x__xgafv=None)

+

Submit a provisiong configuration for a given project.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ submitProvisioningConfig(project, location, body=None, x__xgafv=None) +
Submit a provisiong configuration for a given project.
+
+Args:
+  project: string, Required. The target project of the provisioning request. (required)
+  location: string, Required. The target location of the provisioning request. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request for SubmitProvisioningConfig.
+  "provisioningConfig": { # An provisioning configuration. # Required. The ProvisioningConfig to submit.
+    "instances": [ # Instances to be created.
+      { # Configuration parameters for a new instance.
+        "clientNetwork": { # A network. # Client network address.
+          "address": "A String", # IP address to be assigned to the server.
+          "networkId": "A String", # Id of the network to use, within the same ProvisioningConfig request.
+        },
+        "hyperthreading": True or False, # Whether the instance should be provisioned with Hyperthreading enabled.
+        "id": "A String", # A transient unique identifier to idenfity an instance within an ProvisioningConfig request.
+        "instanceType": "A String", # Instance type.
+        "location": "A String", # Location where to deploy the instance.
+        "osImage": "A String", # OS image to initialize the instance.
+        "privateNetwork": { # A network. # Private network address, if any.
+          "address": "A String", # IP address to be assigned to the server.
+          "networkId": "A String", # Id of the network to use, within the same ProvisioningConfig request.
+        },
+      },
+    ],
+    "networks": [ # Networks to be created.
+      { # Configuration parameters for a new network.
+        "bandwidth": "A String", # Interconnect bandwidth. Set only when type is CLIENT.
+        "cidr": "A String", # CIDR range of the network.
+        "id": "A String", # A transient unique identifier to identify a volume within an ProvisioningConfig request.
+        "location": "A String", # Location where to deploy the network.
+        "serviceCidr": "A String", # Service CIDR, if any.
+        "type": "A String", # The type of this network.
+        "vlanAttachments": [ # List of VLAN attachments. As of now there are always 2 attachments, but it is going to change in the future (multi vlan).
+          { # A GCP vlan attachment.
+            "id": "A String", # Identifier of the VLAN attachment.
+            "pairingKey": "A String", # Attachment pairing key.
+          },
+        ],
+      },
+    ],
+    "ticketId": "A String", # A reference to track the request.
+    "volumes": [ # Volumes to be created.
+      { # Configuration parameters for a new volume.
+        "id": "A String", # A transient unique identifier to identify a volume within an ProvisioningConfig request.
+        "location": "A String", # Location where to deploy the volume.
+        "lunRanges": [ # LUN ranges to be configured. Set only when protocol is PROTOCOL_FC.
+          { # A LUN range.
+            "quantity": 42, # Number of LUNs to create.
+            "sizeGb": 42, # The requested size of each LUN, in GB.
+          },
+        ],
+        "machineIds": [ # Machine ids connected to this volume. Set only when protocol is PROTOCOL_FC.
+          "A String",
+        ],
+        "nfsExports": [ # NFS exports. Set only when protocol is PROTOCOL_NFS.
+          { # A NFS export entry.
+            "allowDev": True or False, # Allow dev.
+            "allowSuid": True or False, # Allow the setuid flag.
+            "cidr": "A String", # A CIDR range.
+            "machineId": "A String", # A single machine, identified by an ID.
+            "networkId": "A String", # Network to use to publish the export.
+            "noRootSquash": True or False, # Disable root squashing.
+            "permissions": "A String", # Export permissions.
+          },
+        ],
+        "protocol": "A String", # Volume protocol.
+        "sizeGb": 42, # The requested size of this volume, in GB. This will be updated in a later iteration with a generic size field.
+        "snapshotsEnabled": True or False, # Whether snapshots should be enabled.
+        "type": "A String", # The type of this Volume.
+      },
+    ],
+  },
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # An provisioning configuration.
+  "instances": [ # Instances to be created.
+    { # Configuration parameters for a new instance.
+      "clientNetwork": { # A network. # Client network address.
+        "address": "A String", # IP address to be assigned to the server.
+        "networkId": "A String", # Id of the network to use, within the same ProvisioningConfig request.
+      },
+      "hyperthreading": True or False, # Whether the instance should be provisioned with Hyperthreading enabled.
+      "id": "A String", # A transient unique identifier to idenfity an instance within an ProvisioningConfig request.
+      "instanceType": "A String", # Instance type.
+      "location": "A String", # Location where to deploy the instance.
+      "osImage": "A String", # OS image to initialize the instance.
+      "privateNetwork": { # A network. # Private network address, if any.
+        "address": "A String", # IP address to be assigned to the server.
+        "networkId": "A String", # Id of the network to use, within the same ProvisioningConfig request.
+      },
+    },
+  ],
+  "networks": [ # Networks to be created.
+    { # Configuration parameters for a new network.
+      "bandwidth": "A String", # Interconnect bandwidth. Set only when type is CLIENT.
+      "cidr": "A String", # CIDR range of the network.
+      "id": "A String", # A transient unique identifier to identify a volume within an ProvisioningConfig request.
+      "location": "A String", # Location where to deploy the network.
+      "serviceCidr": "A String", # Service CIDR, if any.
+      "type": "A String", # The type of this network.
+      "vlanAttachments": [ # List of VLAN attachments. As of now there are always 2 attachments, but it is going to change in the future (multi vlan).
+        { # A GCP vlan attachment.
+          "id": "A String", # Identifier of the VLAN attachment.
+          "pairingKey": "A String", # Attachment pairing key.
+        },
+      ],
+    },
+  ],
+  "ticketId": "A String", # A reference to track the request.
+  "volumes": [ # Volumes to be created.
+    { # Configuration parameters for a new volume.
+      "id": "A String", # A transient unique identifier to identify a volume within an ProvisioningConfig request.
+      "location": "A String", # Location where to deploy the volume.
+      "lunRanges": [ # LUN ranges to be configured. Set only when protocol is PROTOCOL_FC.
+        { # A LUN range.
+          "quantity": 42, # Number of LUNs to create.
+          "sizeGb": 42, # The requested size of each LUN, in GB.
+        },
+      ],
+      "machineIds": [ # Machine ids connected to this volume. Set only when protocol is PROTOCOL_FC.
+        "A String",
+      ],
+      "nfsExports": [ # NFS exports. Set only when protocol is PROTOCOL_NFS.
+        { # A NFS export entry.
+          "allowDev": True or False, # Allow dev.
+          "allowSuid": True or False, # Allow the setuid flag.
+          "cidr": "A String", # A CIDR range.
+          "machineId": "A String", # A single machine, identified by an ID.
+          "networkId": "A String", # Network to use to publish the export.
+          "noRootSquash": True or False, # Disable root squashing.
+          "permissions": "A String", # Export permissions.
+        },
+      ],
+      "protocol": "A String", # Volume protocol.
+      "sizeGb": 42, # The requested size of this volume, in GB. This will be updated in a later iteration with a generic size field.
+      "snapshotsEnabled": True or False, # Whether snapshots should be enabled.
+      "type": "A String", # The type of this Volume.
+    },
+  ],
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/baremetalsolution_v1alpha1.projects.locations.instances.html b/docs/dyn/baremetalsolution_v1alpha1.projects.locations.instances.html new file mode 100644 index 00000000000..5ba81d62ddc --- /dev/null +++ b/docs/dyn/baremetalsolution_v1alpha1.projects.locations.instances.html @@ -0,0 +1,312 @@ + + + +

Bare Metal Solution API . projects . locations . instances

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ disableInteractiveSerialConsole(instance, body=None, x__xgafv=None)

+

Disable the interactive serial console feature on a specific machine.

+

+ enableInteractiveSerialConsole(instance, body=None, x__xgafv=None)

+

Enable the interactive serial console feature on a specific machine.

+

+ get(name, x__xgafv=None)

+

Get details for a specific named Instance.

+

+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

List Instances (physical servers).

+

+ list_next(previous_request, previous_response)

+

Retrieves the next page of results.

+

+ readSerialPortOutput(instance, startByte=None, x__xgafv=None)

+

Read the most recent serial port output from a machine.

+

+ resetInstance(instance, body=None, x__xgafv=None)

+

Perform an ungraceful, hard reset on a machine (equivalent to physically turning power off and then back on).

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ disableInteractiveSerialConsole(instance, body=None, x__xgafv=None) +
Disable the interactive serial console feature on a specific machine.
+
+Args:
+  instance: string, Required. Name of the instance to disable the interactive serial console feature on. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request for DisableInteractiveSerialConsole.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for DisableInteractiveSerialConsole.
+}
+
+ +
+ enableInteractiveSerialConsole(instance, body=None, x__xgafv=None) +
Enable the interactive serial console feature on a specific machine.
+
+Args:
+  instance: string, Required. Name of the instance to enable the interactive serial console feature on. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request for EnableInteractiveSerialConsole.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for EnableInteractiveSerialConsole.
+}
+
+ +
+ get(name, x__xgafv=None) +
Get details for a specific named Instance.
+
+Args:
+  name: string, Required. The name of the Instance to retrieve. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # An Instance.
+  "hyperthreadingEnabled": True or False, # Is hyperthreading enabled for this instance?
+  "luns": [ # The Luns attached to this instance
+    { # A storage Lun.
+      "isBoot": True or False, # Whether this Lun is a boot Lun.
+      "multiprotocolType": "A String", # The multiprotocol type of this Lun.
+      "name": "A String", # Output only. The name of this Lun.
+      "remoteVolume": { # Volume registered in the project. # The storage volume that this Lun is attached to.
+        "autoGrownSizeGb": "A String", # The size, in GB, that this Volume has expanded as a result of an auto grow policy.
+        "currentSizeGb": "A String", # The current size of this Volume, in GB, including space reserved for snapshots. This size may be different than the requested size if the Volume has been configured with auto grow or auto shrink.
+        "name": "A String", # Output only. The name of this Volume.
+        "remainingSpaceGb": "A String", # The space remaining in the Volume for new LUNs, in GB, excluding space reserved for snapshots.
+        "requestedSizeGb": "A String", # The requested size of this Volume, in GB.
+        "snapshotReservedSpacePercent": 42, # The percent of space on this Volume reserved for snapshots.
+        "snapshotReservedSpaceRemainingGb": "A String", # The amount, in GB, of space available in this Volume's reserved snapshot space.
+        "snapshotReservedSpaceUsedPercent": 42, # The percent of reserved snapshot space on this Volume that is actually used by snapshot copies. This may be higher than 100% if snapshot copies are occupying more space than has been reserved on the Volume.
+        "state": "A String", # The state of this Volume.
+        "type": "A String", # The type of this Volume.
+      },
+      "shareable": True or False, # Whether this Lun is allowed to be shared between multiple physical servers.
+      "sizeGb": "A String", # The size of this Lun, in gigabytes.
+      "state": "A String", # The state of this Lun.
+    },
+  ],
+  "name": "A String", # Output only. The name of this Instance.
+  "scheduledPowerResetTime": "A String", # The scheduled power reset time.
+  "sshEnabled": True or False, # Is SSH enabled for this instance?
+  "state": "A String", # The state of this Instance.
+}
+
+ +
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None) +
List Instances (physical servers).
+
+Args:
+  parent: string, Required. The location to list Instances in. (required)
+  pageSize: integer, The maximum number of items to return.
+  pageToken: string, The next_page_token value returned from a previous List request, if any.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for ListInstances.
+  "instances": [ # The Instances in this project.
+    { # An Instance.
+      "hyperthreadingEnabled": True or False, # Is hyperthreading enabled for this instance?
+      "luns": [ # The Luns attached to this instance
+        { # A storage Lun.
+          "isBoot": True or False, # Whether this Lun is a boot Lun.
+          "multiprotocolType": "A String", # The multiprotocol type of this Lun.
+          "name": "A String", # Output only. The name of this Lun.
+          "remoteVolume": { # Volume registered in the project. # The storage volume that this Lun is attached to.
+            "autoGrownSizeGb": "A String", # The size, in GB, that this Volume has expanded as a result of an auto grow policy.
+            "currentSizeGb": "A String", # The current size of this Volume, in GB, including space reserved for snapshots. This size may be different than the requested size if the Volume has been configured with auto grow or auto shrink.
+            "name": "A String", # Output only. The name of this Volume.
+            "remainingSpaceGb": "A String", # The space remaining in the Volume for new LUNs, in GB, excluding space reserved for snapshots.
+            "requestedSizeGb": "A String", # The requested size of this Volume, in GB.
+            "snapshotReservedSpacePercent": 42, # The percent of space on this Volume reserved for snapshots.
+            "snapshotReservedSpaceRemainingGb": "A String", # The amount, in GB, of space available in this Volume's reserved snapshot space.
+            "snapshotReservedSpaceUsedPercent": 42, # The percent of reserved snapshot space on this Volume that is actually used by snapshot copies. This may be higher than 100% if snapshot copies are occupying more space than has been reserved on the Volume.
+            "state": "A String", # The state of this Volume.
+            "type": "A String", # The type of this Volume.
+          },
+          "shareable": True or False, # Whether this Lun is allowed to be shared between multiple physical servers.
+          "sizeGb": "A String", # The size of this Lun, in gigabytes.
+          "state": "A String", # The state of this Lun.
+        },
+      ],
+      "name": "A String", # Output only. The name of this Instance.
+      "scheduledPowerResetTime": "A String", # The scheduled power reset time.
+      "sshEnabled": True or False, # Is SSH enabled for this instance?
+      "state": "A String", # The state of this Instance.
+    },
+  ],
+  "nextPageToken": "A String", # Token to retrieve the next page of results, or empty if there are no more results in the list.
+}
+
+ +
+ list_next(previous_request, previous_response) +
Retrieves the next page of results.
+
+Args:
+  previous_request: The request for the previous page. (required)
+  previous_response: The response from the request for the previous page. (required)
+
+Returns:
+  A request object that you can call 'execute()' on to request the next
+  page. Returns None if there are no more items in the collection.
+    
+
+ +
+ readSerialPortOutput(instance, startByte=None, x__xgafv=None) +
Read the most recent serial port output from a machine.
+
+Args:
+  instance: string, Required. Name of the instance to get serial port output of. (required)
+  startByte: string, Optional. The start byte of the serial port output to return.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for ReadSerialPortOutput.
+  "contents": "A String", # The serial port output.
+  "nextStartByte": "A String", # The byte index to use in a subsequent call to ReadSerialPortOutput to get more output.
+  "start": "A String", # The start byte index of the included contents.
+}
+
+ +
+ resetInstance(instance, body=None, x__xgafv=None) +
Perform an ungraceful, hard reset on a machine (equivalent to physically turning power off and then back on).
+
+Args:
+  instance: string, Required. Name of the instance to reset. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request for ResetInstance.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for ResetInstance.
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/baremetalsolution_v1alpha1.projects.locations.luns.html b/docs/dyn/baremetalsolution_v1alpha1.projects.locations.luns.html new file mode 100644 index 00000000000..6cf158c8836 --- /dev/null +++ b/docs/dyn/baremetalsolution_v1alpha1.projects.locations.luns.html @@ -0,0 +1,188 @@ + + + +

Bare Metal Solution API . projects . locations . luns

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ get(name, x__xgafv=None)

+

Get details for a specific named Lun.

+

+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

List Luns.

+

+ list_next(previous_request, previous_response)

+

Retrieves the next page of results.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ get(name, x__xgafv=None) +
Get details for a specific named Lun.
+
+Args:
+  name: string, Required. The name of the Lun to retrieve. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A storage Lun.
+  "isBoot": True or False, # Whether this Lun is a boot Lun.
+  "multiprotocolType": "A String", # The multiprotocol type of this Lun.
+  "name": "A String", # Output only. The name of this Lun.
+  "remoteVolume": { # Volume registered in the project. # The storage volume that this Lun is attached to.
+    "autoGrownSizeGb": "A String", # The size, in GB, that this Volume has expanded as a result of an auto grow policy.
+    "currentSizeGb": "A String", # The current size of this Volume, in GB, including space reserved for snapshots. This size may be different than the requested size if the Volume has been configured with auto grow or auto shrink.
+    "name": "A String", # Output only. The name of this Volume.
+    "remainingSpaceGb": "A String", # The space remaining in the Volume for new LUNs, in GB, excluding space reserved for snapshots.
+    "requestedSizeGb": "A String", # The requested size of this Volume, in GB.
+    "snapshotReservedSpacePercent": 42, # The percent of space on this Volume reserved for snapshots.
+    "snapshotReservedSpaceRemainingGb": "A String", # The amount, in GB, of space available in this Volume's reserved snapshot space.
+    "snapshotReservedSpaceUsedPercent": 42, # The percent of reserved snapshot space on this Volume that is actually used by snapshot copies. This may be higher than 100% if snapshot copies are occupying more space than has been reserved on the Volume.
+    "state": "A String", # The state of this Volume.
+    "type": "A String", # The type of this Volume.
+  },
+  "shareable": True or False, # Whether this Lun is allowed to be shared between multiple physical servers.
+  "sizeGb": "A String", # The size of this Lun, in gigabytes.
+  "state": "A String", # The state of this Lun.
+}
+
+ +
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None) +
List Luns.
+
+Args:
+  parent: string, Required. The location to list Luns in. (required)
+  pageSize: integer, The maximum number of items to return.
+  pageToken: string, The next_page_token value returned from a previous List request, if any.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for ListLuns.
+  "luns": [ # The Luns in this project.
+    { # A storage Lun.
+      "isBoot": True or False, # Whether this Lun is a boot Lun.
+      "multiprotocolType": "A String", # The multiprotocol type of this Lun.
+      "name": "A String", # Output only. The name of this Lun.
+      "remoteVolume": { # Volume registered in the project. # The storage volume that this Lun is attached to.
+        "autoGrownSizeGb": "A String", # The size, in GB, that this Volume has expanded as a result of an auto grow policy.
+        "currentSizeGb": "A String", # The current size of this Volume, in GB, including space reserved for snapshots. This size may be different than the requested size if the Volume has been configured with auto grow or auto shrink.
+        "name": "A String", # Output only. The name of this Volume.
+        "remainingSpaceGb": "A String", # The space remaining in the Volume for new LUNs, in GB, excluding space reserved for snapshots.
+        "requestedSizeGb": "A String", # The requested size of this Volume, in GB.
+        "snapshotReservedSpacePercent": 42, # The percent of space on this Volume reserved for snapshots.
+        "snapshotReservedSpaceRemainingGb": "A String", # The amount, in GB, of space available in this Volume's reserved snapshot space.
+        "snapshotReservedSpaceUsedPercent": 42, # The percent of reserved snapshot space on this Volume that is actually used by snapshot copies. This may be higher than 100% if snapshot copies are occupying more space than has been reserved on the Volume.
+        "state": "A String", # The state of this Volume.
+        "type": "A String", # The type of this Volume.
+      },
+      "shareable": True or False, # Whether this Lun is allowed to be shared between multiple physical servers.
+      "sizeGb": "A String", # The size of this Lun, in gigabytes.
+      "state": "A String", # The state of this Lun.
+    },
+  ],
+  "nextPageToken": "A String", # Token to retrieve the next page of results, or empty if there are no more results in the list.
+}
+
+ +
+ list_next(previous_request, previous_response) +
Retrieves the next page of results.
+
+Args:
+  previous_request: The request for the previous page. (required)
+  previous_response: The response from the request for the previous page. (required)
+
+Returns:
+  A request object that you can call 'execute()' on to request the next
+  page. Returns None if there are no more items in the collection.
+    
+
+ + \ No newline at end of file diff --git a/docs/dyn/baremetalsolution_v1alpha1.projects.locations.volumes.html b/docs/dyn/baremetalsolution_v1alpha1.projects.locations.volumes.html new file mode 100644 index 00000000000..b5ad3798e84 --- /dev/null +++ b/docs/dyn/baremetalsolution_v1alpha1.projects.locations.volumes.html @@ -0,0 +1,253 @@ + + + +

Bare Metal Solution API . projects . locations . volumes

+

Instance Methods

+

+ snapshots() +

+

Returns the snapshots Resource.

+ +

+ close()

+

Close httplib2 connections.

+

+ get(name, x__xgafv=None)

+

Get details for a specific named Volume.

+

+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

List the volumes for the specified project

+

+ list_next(previous_request, previous_response)

+

Retrieves the next page of results.

+

+ patch(name, body=None, updateMask=None, x__xgafv=None)

+

Update certain parameters on a Volume.

+

+ setVolumeSnapshotSchedulePolicy(volume, body=None, x__xgafv=None)

+

Sets the specified snapshot schedule policy on the specified volume.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ get(name, x__xgafv=None) +
Get details for a specific named Volume.
+
+Args:
+  name: string, Required. The name of the Volume to retrieve. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Volume registered in the project.
+  "autoGrownSizeGb": "A String", # The size, in GB, that this Volume has expanded as a result of an auto grow policy.
+  "currentSizeGb": "A String", # The current size of this Volume, in GB, including space reserved for snapshots. This size may be different than the requested size if the Volume has been configured with auto grow or auto shrink.
+  "name": "A String", # Output only. The name of this Volume.
+  "remainingSpaceGb": "A String", # The space remaining in the Volume for new LUNs, in GB, excluding space reserved for snapshots.
+  "requestedSizeGb": "A String", # The requested size of this Volume, in GB.
+  "snapshotReservedSpacePercent": 42, # The percent of space on this Volume reserved for snapshots.
+  "snapshotReservedSpaceRemainingGb": "A String", # The amount, in GB, of space available in this Volume's reserved snapshot space.
+  "snapshotReservedSpaceUsedPercent": 42, # The percent of reserved snapshot space on this Volume that is actually used by snapshot copies. This may be higher than 100% if snapshot copies are occupying more space than has been reserved on the Volume.
+  "state": "A String", # The state of this Volume.
+  "type": "A String", # The type of this Volume.
+}
+
+ +
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None) +
List the volumes for the specified project
+
+Args:
+  parent: string, Required. The location to list Volumes in. (required)
+  pageSize: integer, The maximum number of items to return.
+  pageToken: string, The next_page_token value returned from a previous List request, if any.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for ListVolumes.
+  "nextPageToken": "A String", # Token to retrieve the next page of results, or empty if there are no more results in the list.
+  "volumes": [ # The volumes registered in this project.
+    { # Volume registered in the project.
+      "autoGrownSizeGb": "A String", # The size, in GB, that this Volume has expanded as a result of an auto grow policy.
+      "currentSizeGb": "A String", # The current size of this Volume, in GB, including space reserved for snapshots. This size may be different than the requested size if the Volume has been configured with auto grow or auto shrink.
+      "name": "A String", # Output only. The name of this Volume.
+      "remainingSpaceGb": "A String", # The space remaining in the Volume for new LUNs, in GB, excluding space reserved for snapshots.
+      "requestedSizeGb": "A String", # The requested size of this Volume, in GB.
+      "snapshotReservedSpacePercent": 42, # The percent of space on this Volume reserved for snapshots.
+      "snapshotReservedSpaceRemainingGb": "A String", # The amount, in GB, of space available in this Volume's reserved snapshot space.
+      "snapshotReservedSpaceUsedPercent": 42, # The percent of reserved snapshot space on this Volume that is actually used by snapshot copies. This may be higher than 100% if snapshot copies are occupying more space than has been reserved on the Volume.
+      "state": "A String", # The state of this Volume.
+      "type": "A String", # The type of this Volume.
+    },
+  ],
+}
+
+ +
+ list_next(previous_request, previous_response) +
Retrieves the next page of results.
+
+Args:
+  previous_request: The request for the previous page. (required)
+  previous_response: The response from the request for the previous page. (required)
+
+Returns:
+  A request object that you can call 'execute()' on to request the next
+  page. Returns None if there are no more items in the collection.
+    
+
+ +
+ patch(name, body=None, updateMask=None, x__xgafv=None) +
Update certain parameters on a Volume.
+
+Args:
+  name: string, Output only. The name of this Volume. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Volume registered in the project.
+  "autoGrownSizeGb": "A String", # The size, in GB, that this Volume has expanded as a result of an auto grow policy.
+  "currentSizeGb": "A String", # The current size of this Volume, in GB, including space reserved for snapshots. This size may be different than the requested size if the Volume has been configured with auto grow or auto shrink.
+  "name": "A String", # Output only. The name of this Volume.
+  "remainingSpaceGb": "A String", # The space remaining in the Volume for new LUNs, in GB, excluding space reserved for snapshots.
+  "requestedSizeGb": "A String", # The requested size of this Volume, in GB.
+  "snapshotReservedSpacePercent": 42, # The percent of space on this Volume reserved for snapshots.
+  "snapshotReservedSpaceRemainingGb": "A String", # The amount, in GB, of space available in this Volume's reserved snapshot space.
+  "snapshotReservedSpaceUsedPercent": 42, # The percent of reserved snapshot space on this Volume that is actually used by snapshot copies. This may be higher than 100% if snapshot copies are occupying more space than has been reserved on the Volume.
+  "state": "A String", # The state of this Volume.
+  "type": "A String", # The type of this Volume.
+}
+
+  updateMask: string, The list of fields to update. The only currently supported field is `snapshot_reserved_space_percent`.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Volume registered in the project.
+  "autoGrownSizeGb": "A String", # The size, in GB, that this Volume has expanded as a result of an auto grow policy.
+  "currentSizeGb": "A String", # The current size of this Volume, in GB, including space reserved for snapshots. This size may be different than the requested size if the Volume has been configured with auto grow or auto shrink.
+  "name": "A String", # Output only. The name of this Volume.
+  "remainingSpaceGb": "A String", # The space remaining in the Volume for new LUNs, in GB, excluding space reserved for snapshots.
+  "requestedSizeGb": "A String", # The requested size of this Volume, in GB.
+  "snapshotReservedSpacePercent": 42, # The percent of space on this Volume reserved for snapshots.
+  "snapshotReservedSpaceRemainingGb": "A String", # The amount, in GB, of space available in this Volume's reserved snapshot space.
+  "snapshotReservedSpaceUsedPercent": 42, # The percent of reserved snapshot space on this Volume that is actually used by snapshot copies. This may be higher than 100% if snapshot copies are occupying more space than has been reserved on the Volume.
+  "state": "A String", # The state of this Volume.
+  "type": "A String", # The type of this Volume.
+}
+
+ +
+ setVolumeSnapshotSchedulePolicy(volume, body=None, x__xgafv=None) +
Sets the specified snapshot schedule policy on the specified volume.
+
+Args:
+  volume: string, Required. Name of the volume to set snapshot schedule policy on. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request for SetVolumeSnapshotSchedulePolicy.
+  "snapshotSchedulePolicy": "A String", # Required. The name of the policy to set on the volume.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for SetVolumeSnapshotSchedulePolicy.
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/baremetalsolution_v1alpha1.projects.locations.volumes.snapshots.html b/docs/dyn/baremetalsolution_v1alpha1.projects.locations.volumes.snapshots.html new file mode 100644 index 00000000000..ff3a540b1e5 --- /dev/null +++ b/docs/dyn/baremetalsolution_v1alpha1.projects.locations.volumes.snapshots.html @@ -0,0 +1,243 @@ + + + +

Bare Metal Solution API . projects . locations . volumes . snapshots

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ create(parent, body=None, x__xgafv=None)

+

Create snapshot of the specified Volume

+

+ delete(name, x__xgafv=None)

+

Delete specific named snapshot.

+

+ get(name, x__xgafv=None)

+

Get details for a specific named snapshot.

+

+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

List the Snapshots for the specified Volume

+

+ list_next(previous_request, previous_response)

+

Retrieves the next page of results.

+

+ restore(name, body=None, x__xgafv=None)

+

Restore a VolumeSnapshot.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ create(parent, body=None, x__xgafv=None) +
Create snapshot of the specified Volume
+
+Args:
+  parent: string, Required. The Volume containing the VolumeSnapshots. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # VolumeSnapshot registered for given Volume
+  "description": "A String", # The description of this Snapshot.
+  "name": "A String", # Output only. The name of this Snapshot.
+  "sizeBytes": "A String", # The real size of this Snapshot, in bytes.
+  "state": "A String", # The state of this Snapshot.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # VolumeSnapshot registered for given Volume
+  "description": "A String", # The description of this Snapshot.
+  "name": "A String", # Output only. The name of this Snapshot.
+  "sizeBytes": "A String", # The real size of this Snapshot, in bytes.
+  "state": "A String", # The state of this Snapshot.
+}
+
+ +
+ delete(name, x__xgafv=None) +
Delete specific named snapshot.
+
+Args:
+  name: string, Required. The name of the snapshot to delete. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.
+}
+
+ +
+ get(name, x__xgafv=None) +
Get details for a specific named snapshot.
+
+Args:
+  name: string, Required. The name of the snapshot to retrieve. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # VolumeSnapshot registered for given Volume
+  "description": "A String", # The description of this Snapshot.
+  "name": "A String", # Output only. The name of this Snapshot.
+  "sizeBytes": "A String", # The real size of this Snapshot, in bytes.
+  "state": "A String", # The state of this Snapshot.
+}
+
+ +
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None) +
List the Snapshots for the specified Volume
+
+Args:
+  parent: string, Required. The Volume containing the VolumeSnapshots. (required)
+  pageSize: integer, The maximum number of items to return.
+  pageToken: string, The next_page_token value returned from a previous List request, if any.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for ListVolumeSnapshots.
+  "nextPageToken": "A String", # Token to retrieve the next page of results, or empty if there are no more results in the list.
+  "volumeSnapshots": [ # The VolumeSnapshots for the volume.
+    { # VolumeSnapshot registered for given Volume
+      "description": "A String", # The description of this Snapshot.
+      "name": "A String", # Output only. The name of this Snapshot.
+      "sizeBytes": "A String", # The real size of this Snapshot, in bytes.
+      "state": "A String", # The state of this Snapshot.
+    },
+  ],
+}
+
+ +
+ list_next(previous_request, previous_response) +
Retrieves the next page of results.
+
+Args:
+  previous_request: The request for the previous page. (required)
+  previous_response: The response from the request for the previous page. (required)
+
+Returns:
+  A request object that you can call 'execute()' on to request the next
+  page. Returns None if there are no more items in the collection.
+    
+
+ +
+ restore(name, body=None, x__xgafv=None) +
Restore a VolumeSnapshot.
+
+Args:
+  name: string, Required. Name of the VolumeSnapshot to restore. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request for RestoreVolumeSnapshot.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/baremetalsolution_v1alpha1.projects.provisioningQuotas.html b/docs/dyn/baremetalsolution_v1alpha1.projects.provisioningQuotas.html new file mode 100644 index 00000000000..63fa07b7fa8 --- /dev/null +++ b/docs/dyn/baremetalsolution_v1alpha1.projects.provisioningQuotas.html @@ -0,0 +1,136 @@ + + + +

Bare Metal Solution API . projects . provisioningQuotas

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

List the budget details to provision resources on a given project.

+

+ list_next(previous_request, previous_response)

+

Retrieves the next page of results.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None) +
List the budget details to provision resources on a given project.
+
+Args:
+  parent: string, Required. The parent project containing the provisioning quotas. (required)
+  pageSize: integer, The maximum number of items to return.
+  pageToken: string, The next_page_token value returned from a previous List request, if any.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for ListProvisioningQuotas.
+  "nextPageToken": "A String", # Token to retrieve the next page of results, or empty if there are no more results in the list.
+  "provisioningQuotas": [ # The provisioning quotas registered in this project.
+    { # A provisioning quota for a given project.
+      "instanceQuota": { # A resource budget. # Instance quota.
+        "availableMachineCount": 42, # Number of machines than can be created for the given location and instance_type.
+        "instanceType": "A String", # Instance type.
+        "location": "A String", # Location where the quota applies.
+      },
+    },
+  ],
+}
+
+ +
+ list_next(previous_request, previous_response) +
Retrieves the next page of results.
+
+Args:
+  previous_request: The request for the previous page. (required)
+  previous_response: The response from the request for the previous page. (required)
+
+Returns:
+  A request object that you can call 'execute()' on to request the next
+  page. Returns None if there are no more items in the collection.
+    
+
+ + \ No newline at end of file diff --git a/docs/dyn/baremetalsolution_v1alpha1.projects.snapshotSchedulePolicies.html b/docs/dyn/baremetalsolution_v1alpha1.projects.snapshotSchedulePolicies.html new file mode 100644 index 00000000000..dfab21c9525 --- /dev/null +++ b/docs/dyn/baremetalsolution_v1alpha1.projects.snapshotSchedulePolicies.html @@ -0,0 +1,300 @@ + + + +

Bare Metal Solution API . projects . snapshotSchedulePolicies

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ create(parent, body=None, x__xgafv=None)

+

Create a SnapshotSchedulePolicy.

+

+ delete(name, x__xgafv=None)

+

Delete removes named snapshot schedule policy

+

+ get(name, x__xgafv=None)

+

Get details for a specific snapshot schedule policy

+

+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

List the snapshot schedule policies for the specified project

+

+ list_next(previous_request, previous_response)

+

Retrieves the next page of results.

+

+ patch(name, body=None, updateMask=None, x__xgafv=None)

+

Update a SnapshotSchedulePolicy.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ create(parent, body=None, x__xgafv=None) +
Create a SnapshotSchedulePolicy.
+
+Args:
+  parent: string, Required. The parent project containing the SnapshotSchedulePolicy. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # A snapshot schedule policy.
+  "description": "A String", # The description of this SnapshotSchedulePolicy.
+  "name": "A String", # Output only. The name of this SnapshotSchedulePolicy.
+  "schedules": [ # The snapshot Schedules contained in this Policy. At most 5 Schedules may be specified.
+    { # A snapshot schedule.
+      "crontabSpec": "A String", # The crontab-like specification that this Schedule will use to take snapshots.
+      "prefix": "A String", # A string to prefix names of snapshots created under this Schedule.
+      "retentionCount": 42, # The maximum number of snapshots to retain under this Schedule.
+    },
+  ],
+  "volumes": [ # The names of the Volumes this policy is associated with.
+    "A String",
+  ],
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A snapshot schedule policy.
+  "description": "A String", # The description of this SnapshotSchedulePolicy.
+  "name": "A String", # Output only. The name of this SnapshotSchedulePolicy.
+  "schedules": [ # The snapshot Schedules contained in this Policy. At most 5 Schedules may be specified.
+    { # A snapshot schedule.
+      "crontabSpec": "A String", # The crontab-like specification that this Schedule will use to take snapshots.
+      "prefix": "A String", # A string to prefix names of snapshots created under this Schedule.
+      "retentionCount": 42, # The maximum number of snapshots to retain under this Schedule.
+    },
+  ],
+  "volumes": [ # The names of the Volumes this policy is associated with.
+    "A String",
+  ],
+}
+
+ +
+ delete(name, x__xgafv=None) +
Delete removes named snapshot schedule policy
+
+Args:
+  name: string, Required. The name of the snapshot to delete. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.
+}
+
+ +
+ get(name, x__xgafv=None) +
Get details for a specific snapshot schedule policy
+
+Args:
+  name: string, Required. The name of the policy to retrieve. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A snapshot schedule policy.
+  "description": "A String", # The description of this SnapshotSchedulePolicy.
+  "name": "A String", # Output only. The name of this SnapshotSchedulePolicy.
+  "schedules": [ # The snapshot Schedules contained in this Policy. At most 5 Schedules may be specified.
+    { # A snapshot schedule.
+      "crontabSpec": "A String", # The crontab-like specification that this Schedule will use to take snapshots.
+      "prefix": "A String", # A string to prefix names of snapshots created under this Schedule.
+      "retentionCount": 42, # The maximum number of snapshots to retain under this Schedule.
+    },
+  ],
+  "volumes": [ # The names of the Volumes this policy is associated with.
+    "A String",
+  ],
+}
+
+ +
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None) +
List the snapshot schedule policies for the specified project
+
+Args:
+  parent: string, Required. The parent project containing the Snapshot Schedule Policies. (required)
+  pageSize: integer, The maximum number of items to return.
+  pageToken: string, The next_page_token value returned from a previous List request, if any.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for ListSnapshotSchedulePolicies.
+  "nextPageToken": "A String", # Token to retrieve the next page of results, or empty if there are no more results in the list.
+  "snapshotSchedulePolicies": [ # The snapshot schedule policies registered in this project.
+    { # A snapshot schedule policy.
+      "description": "A String", # The description of this SnapshotSchedulePolicy.
+      "name": "A String", # Output only. The name of this SnapshotSchedulePolicy.
+      "schedules": [ # The snapshot Schedules contained in this Policy. At most 5 Schedules may be specified.
+        { # A snapshot schedule.
+          "crontabSpec": "A String", # The crontab-like specification that this Schedule will use to take snapshots.
+          "prefix": "A String", # A string to prefix names of snapshots created under this Schedule.
+          "retentionCount": 42, # The maximum number of snapshots to retain under this Schedule.
+        },
+      ],
+      "volumes": [ # The names of the Volumes this policy is associated with.
+        "A String",
+      ],
+    },
+  ],
+}
+
+ +
+ list_next(previous_request, previous_response) +
Retrieves the next page of results.
+
+Args:
+  previous_request: The request for the previous page. (required)
+  previous_response: The response from the request for the previous page. (required)
+
+Returns:
+  A request object that you can call 'execute()' on to request the next
+  page. Returns None if there are no more items in the collection.
+    
+
+ +
+ patch(name, body=None, updateMask=None, x__xgafv=None) +
Update a SnapshotSchedulePolicy.
+
+Args:
+  name: string, Output only. The name of this SnapshotSchedulePolicy. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # A snapshot schedule policy.
+  "description": "A String", # The description of this SnapshotSchedulePolicy.
+  "name": "A String", # Output only. The name of this SnapshotSchedulePolicy.
+  "schedules": [ # The snapshot Schedules contained in this Policy. At most 5 Schedules may be specified.
+    { # A snapshot schedule.
+      "crontabSpec": "A String", # The crontab-like specification that this Schedule will use to take snapshots.
+      "prefix": "A String", # A string to prefix names of snapshots created under this Schedule.
+      "retentionCount": 42, # The maximum number of snapshots to retain under this Schedule.
+    },
+  ],
+  "volumes": [ # The names of the Volumes this policy is associated with.
+    "A String",
+  ],
+}
+
+  updateMask: string, The list of fields to update.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A snapshot schedule policy.
+  "description": "A String", # The description of this SnapshotSchedulePolicy.
+  "name": "A String", # Output only. The name of this SnapshotSchedulePolicy.
+  "schedules": [ # The snapshot Schedules contained in this Policy. At most 5 Schedules may be specified.
+    { # A snapshot schedule.
+      "crontabSpec": "A String", # The crontab-like specification that this Schedule will use to take snapshots.
+      "prefix": "A String", # A string to prefix names of snapshots created under this Schedule.
+      "retentionCount": 42, # The maximum number of snapshots to retain under this Schedule.
+    },
+  ],
+  "volumes": [ # The names of the Volumes this policy is associated with.
+    "A String",
+  ],
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/baremetalsolution_v1alpha1.projects.sshKeys.html b/docs/dyn/baremetalsolution_v1alpha1.projects.sshKeys.html new file mode 100644 index 00000000000..8525e348eb7 --- /dev/null +++ b/docs/dyn/baremetalsolution_v1alpha1.projects.sshKeys.html @@ -0,0 +1,186 @@ + + + +

Bare Metal Solution API . projects . sshKeys

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ create(parent, body=None, sshKeyId=None, x__xgafv=None)

+

Create a new SSH key registration in the specified project.

+

+ delete(name, x__xgafv=None)

+

Delete an SSH key registration in the specified project.

+

+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

List the public SSH keys registered for the specified project.

+

+ list_next(previous_request, previous_response)

+

Retrieves the next page of results.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ create(parent, body=None, sshKeyId=None, x__xgafv=None) +
Create a new SSH key registration in the specified project.
+
+Args:
+  parent: string, Required. The parent project containing the SSH keys. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # A public SSH key registered in the project. Used primarily for the interactive serial console feature.
+  "name": "A String", # Output only. The name of this SSH key.
+  "publicKey": "A String", # The public SSH key.
+}
+
+  sshKeyId: string, Required. The ID to use for the key, which will become the final component of the key's resource name. This value should be match the regex: [a-zA-Z0-9@.\-_]{1,64}
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A public SSH key registered in the project. Used primarily for the interactive serial console feature.
+  "name": "A String", # Output only. The name of this SSH key.
+  "publicKey": "A String", # The public SSH key.
+}
+
+ +
+ delete(name, x__xgafv=None) +
Delete an SSH key registration in the specified project.
+
+Args:
+  name: string, Required. The name of the SSH key to delete. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.
+}
+
+ +
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None) +
List the public SSH keys registered for the specified project.
+
+Args:
+  parent: string, Required. The parent project containing the SSH keys. (required)
+  pageSize: integer, The maximum number of items to return.
+  pageToken: string, The next_page_token value returned from a previous List request, if any.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for ListSSHKeys.
+  "nextPageToken": "A String", # Token to retrieve the next page of results, or empty if there are no more results in the list.
+  "sshKeys": [ # The SSH keys registered in this project.
+    { # A public SSH key registered in the project. Used primarily for the interactive serial console feature.
+      "name": "A String", # Output only. The name of this SSH key.
+      "publicKey": "A String", # The public SSH key.
+    },
+  ],
+}
+
+ +
+ list_next(previous_request, previous_response) +
Retrieves the next page of results.
+
+Args:
+  previous_request: The request for the previous page. (required)
+  previous_response: The response from the request for the previous page. (required)
+
+Returns:
+  A request object that you can call 'execute()' on to request the next
+  page. Returns None if there are no more items in the collection.
+    
+
+ + \ No newline at end of file diff --git a/docs/dyn/civicinfo_v2.representatives.html b/docs/dyn/civicinfo_v2.representatives.html index 39376ef788d..20469123c3b 100644 --- a/docs/dyn/civicinfo_v2.representatives.html +++ b/docs/dyn/civicinfo_v2.representatives.html @@ -120,6 +120,7 @@

Method Details

judge - schoolBoard - specialPurposeOfficer - + otherRole - x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -238,6 +239,7 @@

Method Details

judge - schoolBoard - specialPurposeOfficer - + otherRole - x__xgafv: string, V1 error format. Allowed values 1 - v1 error format diff --git a/docs/dyn/cloudasset_v1.v1.html b/docs/dyn/cloudasset_v1.v1.html index 1234919a8d4..ddec462bea1 100644 --- a/docs/dyn/cloudasset_v1.v1.html +++ b/docs/dyn/cloudasset_v1.v1.html @@ -99,7 +99,7 @@

Instance Methods

searchAllIamPolicies_next(previous_request, previous_response)

Retrieves the next page of results.

- searchAllResources(scope, assetTypes=None, orderBy=None, pageSize=None, pageToken=None, query=None, x__xgafv=None)

+ searchAllResources(scope, assetTypes=None, orderBy=None, pageSize=None, pageToken=None, query=None, readMask=None, x__xgafv=None)

Searches all Cloud resources within the specified scope, such as a project, folder, or organization. The caller must be granted the `cloudasset.assets.searchAllResources` permission on the desired scope, otherwise the request will be rejected.

searchAllResources_next(previous_request, previous_response)

@@ -1525,7 +1525,7 @@

Method Details

- searchAllResources(scope, assetTypes=None, orderBy=None, pageSize=None, pageToken=None, query=None, x__xgafv=None) + searchAllResources(scope, assetTypes=None, orderBy=None, pageSize=None, pageToken=None, query=None, readMask=None, x__xgafv=None)
Searches all Cloud resources within the specified scope, such as a project, folder, or organization. The caller must be granted the `cloudasset.assets.searchAllResources` permission on the desired scope, otherwise the request will be rejected.
 
 Args:
@@ -1535,6 +1535,7 @@ 

Method Details

pageSize: integer, Optional. The page size for search result pagination. Page size is capped at 500 even if a larger value is given. If set to zero, server will pick an appropriate default. Returned results may be fewer than requested. When this happens, there could be more results as long as `next_page_token` is returned. pageToken: string, Optional. If present, then retrieve the next batch of results from the preceding call to this method. `page_token` must be the value of `next_page_token` from the previous response. The values of all other method parameters, must be identical to those in the previous call. query: string, Optional. The query statement. See [how to construct a query](https://cloud.google.com/asset-inventory/docs/searching-resources#how_to_construct_a_query) for more information. If not specified or empty, it will search all the resources within the specified `scope`. Examples: * `name:Important` to find Cloud resources whose name contains "Important" as a word. * `name=Important` to find the Cloud resource whose name is exactly "Important". * `displayName:Impor*` to find Cloud resources whose display name contains "Impor" as a prefix of any word in the field. * `location:us-west*` to find Cloud resources whose location contains both "us" and "west" as prefixes. * `labels:prod` to find Cloud resources whose labels contain "prod" as a key or value. * `labels.env:prod` to find Cloud resources that have a label "env" and its value is "prod". * `labels.env:*` to find Cloud resources that have a label "env". * `kmsKey:key` to find Cloud resources encrypted with a customer-managed encryption key whose name contains the word "key". * `state:ACTIVE` to find Cloud resources whose state contains "ACTIVE" as a word. * `NOT state:ACTIVE` to find {{gcp_name}} resources whose state doesn't contain "ACTIVE" as a word. * `createTime<1609459200` to find Cloud resources that were created before "2021-01-01 00:00:00 UTC". 1609459200 is the epoch timestamp of "2021-01-01 00:00:00 UTC" in seconds. * `updateTime>1609459200` to find Cloud resources that were updated after "2021-01-01 00:00:00 UTC". 1609459200 is the epoch timestamp of "2021-01-01 00:00:00 UTC" in seconds. * `Important` to find Cloud resources that contain "Important" as a word in any of the searchable fields. * `Impor*` to find Cloud resources that contain "Impor" as a prefix of any word in any of the searchable fields. * `Important location:(us-west1 OR global)` to find Cloud resources that contain "Important" as a word in any of the searchable fields and are also located in the "us-west1" region or the "global" location. + readMask: string, Optional. A comma-separated list of fields specifying which fields to be returned in ResourceSearchResult. Only '*' or combination of top level fields can be specified. Field names of both snake_case and camelCase are supported. Examples: `"*"`, `"name,location"`, `"name,versionedResources"`. The read_mask paths must be valid field paths listed but not limited to (both snake_case and camelCase are supported): * name * asset_type or assetType * project * display_name or displayName * description * location * labels * network_tags or networkTags * kms_key or kmsKey * create_time or createTime * update_time or updateTime * state * additional_attributes or additionalAttributes * versioned_resources or versionedResources If read_mask is not specified, all fields except versionedResources will be returned. If only '*' is specified, all fields including versionedResources will be returned. Any invalid field path will trigger INVALID_ARGUMENT error. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -1572,6 +1573,14 @@

Method Details

"project": "A String", # The project that this resource belongs to, in the form of projects/{PROJECT_NUMBER}. This field is available when the resource belongs to a project. To search against `project`: * use a field query. Example: `project:12345` * use a free text query. Example: `12345` * specify the `scope` field as this project in your search request. "state": "A String", # The state of this resource. Different resources types have different state definitions that are mapped from various fields of different resource types. This field is available only when the resource's proto contains it. Example: If the resource is an instance provided by Compute Engine, its state will include PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. See `status` definition in [API Reference](https://cloud.google.com/compute/docs/reference/rest/v1/instances). If the resource is a project provided by Cloud Resource Manager, its state will include LIFECYCLE_STATE_UNSPECIFIED, ACTIVE, DELETE_REQUESTED and DELETE_IN_PROGRESS. See `lifecycleState` definition in [API Reference](https://cloud.google.com/resource-manager/reference/rest/v1/projects). To search against the `state`: * use a field query. Example: `state:RUNNING` * use a free text query. Example: `RUNNING` "updateTime": "A String", # The last update timestamp of this resource, at which the resource was last modified or deleted. The granularity is in seconds. Timestamp.nanos will always be 0. This field is available only when the resource's proto contains it. To search against `update_time`: * use a field query. - value in seconds since unix epoch. Example: `updateTime < 1609459200` - value in date string. Example: `updateTime < 2021-01-01` - value in date-time string (must be quoted). Example: `updateTime < "2021-01-01T00:00:00"` + "versionedResources": [ # Versioned resource representations of this resource. This is repeated because there could be multiple versions of resource representations during version migration. This `versioned_resources` field is not searchable. Some attributes of the resource representations are exposed in `additional_attributes` field, so as to allow users to search on them. + { # Resource representation as defined by the corresponding service providing the resource for a given API version. + "resource": { # JSON representation of the resource as defined by the corresponding service providing this resource. Example: If the resource is an instance provided by Compute Engine, this field will contain the JSON representation of the instance as defined by Compute Engine: `https://cloud.google.com/compute/docs/reference/rest/v1/instances`. You can find the resource definition for each supported resource type in this table: `https://cloud.google.com/asset-inventory/docs/supported-asset-types#searchable_asset_types` + "a_key": "", # Properties of the object. + }, + "version": "A String", # API version of the resource. Example: If the resource is an instance provided by Compute Engine v1 API as defined in `https://cloud.google.com/compute/docs/reference/rest/v1/instances`, version will be "v1". + }, + ], }, ], }
diff --git a/docs/dyn/cloudbuild_v1.projects.builds.html b/docs/dyn/cloudbuild_v1.projects.builds.html index 372f6262b84..b96a4131b2c 100644 --- a/docs/dyn/cloudbuild_v1.projects.builds.html +++ b/docs/dyn/cloudbuild_v1.projects.builds.html @@ -171,6 +171,9 @@

Method Details

"logStreamingOption": "A String", # Option to define build log streaming behavior to Google Cloud Storage. "logging": "A String", # Option to specify the logging mode, which determines if and where build logs are stored. "machineType": "A String", # Compute Engine machine type on which to run the build. + "pool": { # Details about how a build should be executed on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. # Optional. Specification for execution on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. + "name": "A String", # The `WorkerPool` resource to execute the build on. You must have `cloudbuild.workerpools.use` on the project hosting the WorkerPool. Format projects/{project}/locations/{location}/workerPools/{workerPoolId} + }, "requestedVerifyOption": "A String", # Requested verifiability options. "secretEnv": [ # A list of global environment variables, which are encrypted using a Cloud Key Management Service crypto key. These values must be specified in the build's `Secret`. These variables will be available to all build steps in this build. "A String", @@ -185,7 +188,7 @@

Method Details

"path": "A String", # Path at which to mount the volume. Paths must be absolute and cannot conflict with other volume paths on the same build step or with certain reserved volume paths. }, ], - "workerPool": "A String", # Option to specify a `WorkerPool` for the build. Format: projects/{project}/locations/{location}/workerPools/{workerPool} This field is in beta and is available only to restricted users. + "workerPool": "A String", # This field deprecated; please use `pool.name` instead. }, "projectId": "A String", # Output only. ID of the project. "queueTtl": "A String", # TTL in queue for this build. If provided and the build is enqueued longer than this value, the build will expire and the build status will be `EXPIRED`. The TTL starts ticking from create_time. @@ -240,7 +243,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview. + "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -274,7 +277,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. + "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -406,6 +409,9 @@

Method Details

"logStreamingOption": "A String", # Option to define build log streaming behavior to Google Cloud Storage. "logging": "A String", # Option to specify the logging mode, which determines if and where build logs are stored. "machineType": "A String", # Compute Engine machine type on which to run the build. + "pool": { # Details about how a build should be executed on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. # Optional. Specification for execution on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. + "name": "A String", # The `WorkerPool` resource to execute the build on. You must have `cloudbuild.workerpools.use` on the project hosting the WorkerPool. Format projects/{project}/locations/{location}/workerPools/{workerPoolId} + }, "requestedVerifyOption": "A String", # Requested verifiability options. "secretEnv": [ # A list of global environment variables, which are encrypted using a Cloud Key Management Service crypto key. These values must be specified in the build's `Secret`. These variables will be available to all build steps in this build. "A String", @@ -420,7 +426,7 @@

Method Details

"path": "A String", # Path at which to mount the volume. Paths must be absolute and cannot conflict with other volume paths on the same build step or with certain reserved volume paths. }, ], - "workerPool": "A String", # Option to specify a `WorkerPool` for the build. Format: projects/{project}/locations/{location}/workerPools/{workerPool} This field is in beta and is available only to restricted users. + "workerPool": "A String", # This field deprecated; please use `pool.name` instead. }, "projectId": "A String", # Output only. ID of the project. "queueTtl": "A String", # TTL in queue for this build. If provided and the build is enqueued longer than this value, the build will expire and the build status will be `EXPIRED`. The TTL starts ticking from create_time. @@ -475,7 +481,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview. + "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -509,7 +515,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. + "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -672,6 +678,9 @@

Method Details

"logStreamingOption": "A String", # Option to define build log streaming behavior to Google Cloud Storage. "logging": "A String", # Option to specify the logging mode, which determines if and where build logs are stored. "machineType": "A String", # Compute Engine machine type on which to run the build. + "pool": { # Details about how a build should be executed on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. # Optional. Specification for execution on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. + "name": "A String", # The `WorkerPool` resource to execute the build on. You must have `cloudbuild.workerpools.use` on the project hosting the WorkerPool. Format projects/{project}/locations/{location}/workerPools/{workerPoolId} + }, "requestedVerifyOption": "A String", # Requested verifiability options. "secretEnv": [ # A list of global environment variables, which are encrypted using a Cloud Key Management Service crypto key. These values must be specified in the build's `Secret`. These variables will be available to all build steps in this build. "A String", @@ -686,7 +695,7 @@

Method Details

"path": "A String", # Path at which to mount the volume. Paths must be absolute and cannot conflict with other volume paths on the same build step or with certain reserved volume paths. }, ], - "workerPool": "A String", # Option to specify a `WorkerPool` for the build. Format: projects/{project}/locations/{location}/workerPools/{workerPool} This field is in beta and is available only to restricted users. + "workerPool": "A String", # This field deprecated; please use `pool.name` instead. }, "projectId": "A String", # Output only. ID of the project. "queueTtl": "A String", # TTL in queue for this build. If provided and the build is enqueued longer than this value, the build will expire and the build status will be `EXPIRED`. The TTL starts ticking from create_time. @@ -741,7 +750,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview. + "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -775,7 +784,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. + "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -913,6 +922,9 @@

Method Details

"logStreamingOption": "A String", # Option to define build log streaming behavior to Google Cloud Storage. "logging": "A String", # Option to specify the logging mode, which determines if and where build logs are stored. "machineType": "A String", # Compute Engine machine type on which to run the build. + "pool": { # Details about how a build should be executed on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. # Optional. Specification for execution on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. + "name": "A String", # The `WorkerPool` resource to execute the build on. You must have `cloudbuild.workerpools.use` on the project hosting the WorkerPool. Format projects/{project}/locations/{location}/workerPools/{workerPoolId} + }, "requestedVerifyOption": "A String", # Requested verifiability options. "secretEnv": [ # A list of global environment variables, which are encrypted using a Cloud Key Management Service crypto key. These values must be specified in the build's `Secret`. These variables will be available to all build steps in this build. "A String", @@ -927,7 +939,7 @@

Method Details

"path": "A String", # Path at which to mount the volume. Paths must be absolute and cannot conflict with other volume paths on the same build step or with certain reserved volume paths. }, ], - "workerPool": "A String", # Option to specify a `WorkerPool` for the build. Format: projects/{project}/locations/{location}/workerPools/{workerPool} This field is in beta and is available only to restricted users. + "workerPool": "A String", # This field deprecated; please use `pool.name` instead. }, "projectId": "A String", # Output only. ID of the project. "queueTtl": "A String", # TTL in queue for this build. If provided and the build is enqueued longer than this value, the build will expire and the build status will be `EXPIRED`. The TTL starts ticking from create_time. @@ -982,7 +994,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview. + "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -1016,7 +1028,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. + "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. diff --git a/docs/dyn/cloudbuild_v1.projects.locations.builds.html b/docs/dyn/cloudbuild_v1.projects.locations.builds.html index 9be4646a574..156abdcffdc 100644 --- a/docs/dyn/cloudbuild_v1.projects.locations.builds.html +++ b/docs/dyn/cloudbuild_v1.projects.locations.builds.html @@ -170,6 +170,9 @@

Method Details

"logStreamingOption": "A String", # Option to define build log streaming behavior to Google Cloud Storage. "logging": "A String", # Option to specify the logging mode, which determines if and where build logs are stored. "machineType": "A String", # Compute Engine machine type on which to run the build. + "pool": { # Details about how a build should be executed on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. # Optional. Specification for execution on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. + "name": "A String", # The `WorkerPool` resource to execute the build on. You must have `cloudbuild.workerpools.use` on the project hosting the WorkerPool. Format projects/{project}/locations/{location}/workerPools/{workerPoolId} + }, "requestedVerifyOption": "A String", # Requested verifiability options. "secretEnv": [ # A list of global environment variables, which are encrypted using a Cloud Key Management Service crypto key. These values must be specified in the build's `Secret`. These variables will be available to all build steps in this build. "A String", @@ -184,7 +187,7 @@

Method Details

"path": "A String", # Path at which to mount the volume. Paths must be absolute and cannot conflict with other volume paths on the same build step or with certain reserved volume paths. }, ], - "workerPool": "A String", # Option to specify a `WorkerPool` for the build. Format: projects/{project}/locations/{location}/workerPools/{workerPool} This field is in beta and is available only to restricted users. + "workerPool": "A String", # This field deprecated; please use `pool.name` instead. }, "projectId": "A String", # Output only. ID of the project. "queueTtl": "A String", # TTL in queue for this build. If provided and the build is enqueued longer than this value, the build will expire and the build status will be `EXPIRED`. The TTL starts ticking from create_time. @@ -239,7 +242,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview. + "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -273,7 +276,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. + "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -405,6 +408,9 @@

Method Details

"logStreamingOption": "A String", # Option to define build log streaming behavior to Google Cloud Storage. "logging": "A String", # Option to specify the logging mode, which determines if and where build logs are stored. "machineType": "A String", # Compute Engine machine type on which to run the build. + "pool": { # Details about how a build should be executed on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. # Optional. Specification for execution on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. + "name": "A String", # The `WorkerPool` resource to execute the build on. You must have `cloudbuild.workerpools.use` on the project hosting the WorkerPool. Format projects/{project}/locations/{location}/workerPools/{workerPoolId} + }, "requestedVerifyOption": "A String", # Requested verifiability options. "secretEnv": [ # A list of global environment variables, which are encrypted using a Cloud Key Management Service crypto key. These values must be specified in the build's `Secret`. These variables will be available to all build steps in this build. "A String", @@ -419,7 +425,7 @@

Method Details

"path": "A String", # Path at which to mount the volume. Paths must be absolute and cannot conflict with other volume paths on the same build step or with certain reserved volume paths. }, ], - "workerPool": "A String", # Option to specify a `WorkerPool` for the build. Format: projects/{project}/locations/{location}/workerPools/{workerPool} This field is in beta and is available only to restricted users. + "workerPool": "A String", # This field deprecated; please use `pool.name` instead. }, "projectId": "A String", # Output only. ID of the project. "queueTtl": "A String", # TTL in queue for this build. If provided and the build is enqueued longer than this value, the build will expire and the build status will be `EXPIRED`. The TTL starts ticking from create_time. @@ -474,7 +480,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview. + "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -508,7 +514,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. + "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -671,6 +677,9 @@

Method Details

"logStreamingOption": "A String", # Option to define build log streaming behavior to Google Cloud Storage. "logging": "A String", # Option to specify the logging mode, which determines if and where build logs are stored. "machineType": "A String", # Compute Engine machine type on which to run the build. + "pool": { # Details about how a build should be executed on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. # Optional. Specification for execution on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. + "name": "A String", # The `WorkerPool` resource to execute the build on. You must have `cloudbuild.workerpools.use` on the project hosting the WorkerPool. Format projects/{project}/locations/{location}/workerPools/{workerPoolId} + }, "requestedVerifyOption": "A String", # Requested verifiability options. "secretEnv": [ # A list of global environment variables, which are encrypted using a Cloud Key Management Service crypto key. These values must be specified in the build's `Secret`. These variables will be available to all build steps in this build. "A String", @@ -685,7 +694,7 @@

Method Details

"path": "A String", # Path at which to mount the volume. Paths must be absolute and cannot conflict with other volume paths on the same build step or with certain reserved volume paths. }, ], - "workerPool": "A String", # Option to specify a `WorkerPool` for the build. Format: projects/{project}/locations/{location}/workerPools/{workerPool} This field is in beta and is available only to restricted users. + "workerPool": "A String", # This field deprecated; please use `pool.name` instead. }, "projectId": "A String", # Output only. ID of the project. "queueTtl": "A String", # TTL in queue for this build. If provided and the build is enqueued longer than this value, the build will expire and the build status will be `EXPIRED`. The TTL starts ticking from create_time. @@ -740,7 +749,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview. + "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -774,7 +783,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. + "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -912,6 +921,9 @@

Method Details

"logStreamingOption": "A String", # Option to define build log streaming behavior to Google Cloud Storage. "logging": "A String", # Option to specify the logging mode, which determines if and where build logs are stored. "machineType": "A String", # Compute Engine machine type on which to run the build. + "pool": { # Details about how a build should be executed on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. # Optional. Specification for execution on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. + "name": "A String", # The `WorkerPool` resource to execute the build on. You must have `cloudbuild.workerpools.use` on the project hosting the WorkerPool. Format projects/{project}/locations/{location}/workerPools/{workerPoolId} + }, "requestedVerifyOption": "A String", # Requested verifiability options. "secretEnv": [ # A list of global environment variables, which are encrypted using a Cloud Key Management Service crypto key. These values must be specified in the build's `Secret`. These variables will be available to all build steps in this build. "A String", @@ -926,7 +938,7 @@

Method Details

"path": "A String", # Path at which to mount the volume. Paths must be absolute and cannot conflict with other volume paths on the same build step or with certain reserved volume paths. }, ], - "workerPool": "A String", # Option to specify a `WorkerPool` for the build. Format: projects/{project}/locations/{location}/workerPools/{workerPool} This field is in beta and is available only to restricted users. + "workerPool": "A String", # This field deprecated; please use `pool.name` instead. }, "projectId": "A String", # Output only. ID of the project. "queueTtl": "A String", # TTL in queue for this build. If provided and the build is enqueued longer than this value, the build will expire and the build status will be `EXPIRED`. The TTL starts ticking from create_time. @@ -981,7 +993,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview. + "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -1015,7 +1027,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. + "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. diff --git a/docs/dyn/cloudbuild_v1.projects.locations.html b/docs/dyn/cloudbuild_v1.projects.locations.html index f0436f33a86..19eeb5511f3 100644 --- a/docs/dyn/cloudbuild_v1.projects.locations.html +++ b/docs/dyn/cloudbuild_v1.projects.locations.html @@ -89,6 +89,11 @@

Instance Methods

Returns the triggers Resource.

+

+ workerPools() +

+

Returns the workerPools Resource.

+

close()

Close httplib2 connections.

diff --git a/docs/dyn/cloudbuild_v1.projects.locations.triggers.html b/docs/dyn/cloudbuild_v1.projects.locations.triggers.html index 10ad8620a15..3fcbeba9228 100644 --- a/docs/dyn/cloudbuild_v1.projects.locations.triggers.html +++ b/docs/dyn/cloudbuild_v1.projects.locations.triggers.html @@ -117,6 +117,7 @@

Method Details

The object takes the form of: { # Configuration for an automated build in response to source repository changes. + "autodetect": True or False, # Autodetect build configuration. The following precedence is used (case insensitive): 1. cloudbuild.yaml 2. cloudbuild.yml 3. cloudbuild.json 4. Dockerfile Currently only available for GitHub App Triggers. "build": { # A build resource in the Cloud Build API. At a high level, a `Build` describes where to find source code, how to build it (for example, the builder image to run on the source), and where to store the built artifacts. Fields can include the following variables, which will be expanded when the build is created: - $PROJECT_ID: the project ID of the build. - $PROJECT_NUMBER: the project number of the build. - $BUILD_ID: the autogenerated ID of the build. - $REPO_NAME: the source repository name specified by RepoSource. - $BRANCH_NAME: the branch name specified by RepoSource. - $TAG_NAME: the tag name specified by RepoSource. - $REVISION_ID or $COMMIT_SHA: the commit SHA specified by RepoSource or resolved from the specified branch or tag. - $SHORT_SHA: first 7 characters of $REVISION_ID or $COMMIT_SHA. # Contents of the build template. "artifacts": { # Artifacts produced by a build that should be uploaded upon successful completion of all build steps. # Artifacts produced by the build that should be uploaded upon successful completion of all build steps. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images will be pushed using the builder service account's credentials. The digests of the pushed images will be stored in the Build resource's results field. If any of the images fail to be pushed, the build is marked FAILURE. @@ -168,6 +169,9 @@

Method Details

"logStreamingOption": "A String", # Option to define build log streaming behavior to Google Cloud Storage. "logging": "A String", # Option to specify the logging mode, which determines if and where build logs are stored. "machineType": "A String", # Compute Engine machine type on which to run the build. + "pool": { # Details about how a build should be executed on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. # Optional. Specification for execution on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. + "name": "A String", # The `WorkerPool` resource to execute the build on. You must have `cloudbuild.workerpools.use` on the project hosting the WorkerPool. Format projects/{project}/locations/{location}/workerPools/{workerPoolId} + }, "requestedVerifyOption": "A String", # Requested verifiability options. "secretEnv": [ # A list of global environment variables, which are encrypted using a Cloud Key Management Service crypto key. These values must be specified in the build's `Secret`. These variables will be available to all build steps in this build. "A String", @@ -182,7 +186,7 @@

Method Details

"path": "A String", # Path at which to mount the volume. Paths must be absolute and cannot conflict with other volume paths on the same build step or with certain reserved volume paths. }, ], - "workerPool": "A String", # Option to specify a `WorkerPool` for the build. Format: projects/{project}/locations/{location}/workerPools/{workerPool} This field is in beta and is available only to restricted users. + "workerPool": "A String", # This field deprecated; please use `pool.name` instead. }, "projectId": "A String", # Output only. ID of the project. "queueTtl": "A String", # TTL in queue for this build. If provided and the build is enqueued longer than this value, the build will expire and the build status will be `EXPIRED`. The TTL starts ticking from create_time. @@ -237,7 +241,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview. + "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -271,7 +275,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. + "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -405,6 +409,7 @@

Method Details

An object of the form: { # Configuration for an automated build in response to source repository changes. + "autodetect": True or False, # Autodetect build configuration. The following precedence is used (case insensitive): 1. cloudbuild.yaml 2. cloudbuild.yml 3. cloudbuild.json 4. Dockerfile Currently only available for GitHub App Triggers. "build": { # A build resource in the Cloud Build API. At a high level, a `Build` describes where to find source code, how to build it (for example, the builder image to run on the source), and where to store the built artifacts. Fields can include the following variables, which will be expanded when the build is created: - $PROJECT_ID: the project ID of the build. - $PROJECT_NUMBER: the project number of the build. - $BUILD_ID: the autogenerated ID of the build. - $REPO_NAME: the source repository name specified by RepoSource. - $BRANCH_NAME: the branch name specified by RepoSource. - $TAG_NAME: the tag name specified by RepoSource. - $REVISION_ID or $COMMIT_SHA: the commit SHA specified by RepoSource or resolved from the specified branch or tag. - $SHORT_SHA: first 7 characters of $REVISION_ID or $COMMIT_SHA. # Contents of the build template. "artifacts": { # Artifacts produced by a build that should be uploaded upon successful completion of all build steps. # Artifacts produced by the build that should be uploaded upon successful completion of all build steps. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images will be pushed using the builder service account's credentials. The digests of the pushed images will be stored in the Build resource's results field. If any of the images fail to be pushed, the build is marked FAILURE. @@ -456,6 +461,9 @@

Method Details

"logStreamingOption": "A String", # Option to define build log streaming behavior to Google Cloud Storage. "logging": "A String", # Option to specify the logging mode, which determines if and where build logs are stored. "machineType": "A String", # Compute Engine machine type on which to run the build. + "pool": { # Details about how a build should be executed on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. # Optional. Specification for execution on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. + "name": "A String", # The `WorkerPool` resource to execute the build on. You must have `cloudbuild.workerpools.use` on the project hosting the WorkerPool. Format projects/{project}/locations/{location}/workerPools/{workerPoolId} + }, "requestedVerifyOption": "A String", # Requested verifiability options. "secretEnv": [ # A list of global environment variables, which are encrypted using a Cloud Key Management Service crypto key. These values must be specified in the build's `Secret`. These variables will be available to all build steps in this build. "A String", @@ -470,7 +478,7 @@

Method Details

"path": "A String", # Path at which to mount the volume. Paths must be absolute and cannot conflict with other volume paths on the same build step or with certain reserved volume paths. }, ], - "workerPool": "A String", # Option to specify a `WorkerPool` for the build. Format: projects/{project}/locations/{location}/workerPools/{workerPool} This field is in beta and is available only to restricted users. + "workerPool": "A String", # This field deprecated; please use `pool.name` instead. }, "projectId": "A String", # Output only. ID of the project. "queueTtl": "A String", # TTL in queue for this build. If provided and the build is enqueued longer than this value, the build will expire and the build status will be `EXPIRED`. The TTL starts ticking from create_time. @@ -525,7 +533,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview. + "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -559,7 +567,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. + "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -721,6 +729,7 @@

Method Details

An object of the form: { # Configuration for an automated build in response to source repository changes. + "autodetect": True or False, # Autodetect build configuration. The following precedence is used (case insensitive): 1. cloudbuild.yaml 2. cloudbuild.yml 3. cloudbuild.json 4. Dockerfile Currently only available for GitHub App Triggers. "build": { # A build resource in the Cloud Build API. At a high level, a `Build` describes where to find source code, how to build it (for example, the builder image to run on the source), and where to store the built artifacts. Fields can include the following variables, which will be expanded when the build is created: - $PROJECT_ID: the project ID of the build. - $PROJECT_NUMBER: the project number of the build. - $BUILD_ID: the autogenerated ID of the build. - $REPO_NAME: the source repository name specified by RepoSource. - $BRANCH_NAME: the branch name specified by RepoSource. - $TAG_NAME: the tag name specified by RepoSource. - $REVISION_ID or $COMMIT_SHA: the commit SHA specified by RepoSource or resolved from the specified branch or tag. - $SHORT_SHA: first 7 characters of $REVISION_ID or $COMMIT_SHA. # Contents of the build template. "artifacts": { # Artifacts produced by a build that should be uploaded upon successful completion of all build steps. # Artifacts produced by the build that should be uploaded upon successful completion of all build steps. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images will be pushed using the builder service account's credentials. The digests of the pushed images will be stored in the Build resource's results field. If any of the images fail to be pushed, the build is marked FAILURE. @@ -772,6 +781,9 @@

Method Details

"logStreamingOption": "A String", # Option to define build log streaming behavior to Google Cloud Storage. "logging": "A String", # Option to specify the logging mode, which determines if and where build logs are stored. "machineType": "A String", # Compute Engine machine type on which to run the build. + "pool": { # Details about how a build should be executed on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. # Optional. Specification for execution on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. + "name": "A String", # The `WorkerPool` resource to execute the build on. You must have `cloudbuild.workerpools.use` on the project hosting the WorkerPool. Format projects/{project}/locations/{location}/workerPools/{workerPoolId} + }, "requestedVerifyOption": "A String", # Requested verifiability options. "secretEnv": [ # A list of global environment variables, which are encrypted using a Cloud Key Management Service crypto key. These values must be specified in the build's `Secret`. These variables will be available to all build steps in this build. "A String", @@ -786,7 +798,7 @@

Method Details

"path": "A String", # Path at which to mount the volume. Paths must be absolute and cannot conflict with other volume paths on the same build step or with certain reserved volume paths. }, ], - "workerPool": "A String", # Option to specify a `WorkerPool` for the build. Format: projects/{project}/locations/{location}/workerPools/{workerPool} This field is in beta and is available only to restricted users. + "workerPool": "A String", # This field deprecated; please use `pool.name` instead. }, "projectId": "A String", # Output only. ID of the project. "queueTtl": "A String", # TTL in queue for this build. If provided and the build is enqueued longer than this value, the build will expire and the build status will be `EXPIRED`. The TTL starts ticking from create_time. @@ -841,7 +853,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview. + "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -875,7 +887,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. + "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -1021,6 +1033,7 @@

Method Details

"nextPageToken": "A String", # Token to receive the next page of results. "triggers": [ # `BuildTriggers` for the project, sorted by `create_time` descending. { # Configuration for an automated build in response to source repository changes. + "autodetect": True or False, # Autodetect build configuration. The following precedence is used (case insensitive): 1. cloudbuild.yaml 2. cloudbuild.yml 3. cloudbuild.json 4. Dockerfile Currently only available for GitHub App Triggers. "build": { # A build resource in the Cloud Build API. At a high level, a `Build` describes where to find source code, how to build it (for example, the builder image to run on the source), and where to store the built artifacts. Fields can include the following variables, which will be expanded when the build is created: - $PROJECT_ID: the project ID of the build. - $PROJECT_NUMBER: the project number of the build. - $BUILD_ID: the autogenerated ID of the build. - $REPO_NAME: the source repository name specified by RepoSource. - $BRANCH_NAME: the branch name specified by RepoSource. - $TAG_NAME: the tag name specified by RepoSource. - $REVISION_ID or $COMMIT_SHA: the commit SHA specified by RepoSource or resolved from the specified branch or tag. - $SHORT_SHA: first 7 characters of $REVISION_ID or $COMMIT_SHA. # Contents of the build template. "artifacts": { # Artifacts produced by a build that should be uploaded upon successful completion of all build steps. # Artifacts produced by the build that should be uploaded upon successful completion of all build steps. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images will be pushed using the builder service account's credentials. The digests of the pushed images will be stored in the Build resource's results field. If any of the images fail to be pushed, the build is marked FAILURE. @@ -1072,6 +1085,9 @@

Method Details

"logStreamingOption": "A String", # Option to define build log streaming behavior to Google Cloud Storage. "logging": "A String", # Option to specify the logging mode, which determines if and where build logs are stored. "machineType": "A String", # Compute Engine machine type on which to run the build. + "pool": { # Details about how a build should be executed on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. # Optional. Specification for execution on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. + "name": "A String", # The `WorkerPool` resource to execute the build on. You must have `cloudbuild.workerpools.use` on the project hosting the WorkerPool. Format projects/{project}/locations/{location}/workerPools/{workerPoolId} + }, "requestedVerifyOption": "A String", # Requested verifiability options. "secretEnv": [ # A list of global environment variables, which are encrypted using a Cloud Key Management Service crypto key. These values must be specified in the build's `Secret`. These variables will be available to all build steps in this build. "A String", @@ -1086,7 +1102,7 @@

Method Details

"path": "A String", # Path at which to mount the volume. Paths must be absolute and cannot conflict with other volume paths on the same build step or with certain reserved volume paths. }, ], - "workerPool": "A String", # Option to specify a `WorkerPool` for the build. Format: projects/{project}/locations/{location}/workerPools/{workerPool} This field is in beta and is available only to restricted users. + "workerPool": "A String", # This field deprecated; please use `pool.name` instead. }, "projectId": "A String", # Output only. ID of the project. "queueTtl": "A String", # TTL in queue for this build. If provided and the build is enqueued longer than this value, the build will expire and the build status will be `EXPIRED`. The TTL starts ticking from create_time. @@ -1141,7 +1157,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview. + "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -1175,7 +1191,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. + "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -1326,6 +1342,7 @@

Method Details

The object takes the form of: { # Configuration for an automated build in response to source repository changes. + "autodetect": True or False, # Autodetect build configuration. The following precedence is used (case insensitive): 1. cloudbuild.yaml 2. cloudbuild.yml 3. cloudbuild.json 4. Dockerfile Currently only available for GitHub App Triggers. "build": { # A build resource in the Cloud Build API. At a high level, a `Build` describes where to find source code, how to build it (for example, the builder image to run on the source), and where to store the built artifacts. Fields can include the following variables, which will be expanded when the build is created: - $PROJECT_ID: the project ID of the build. - $PROJECT_NUMBER: the project number of the build. - $BUILD_ID: the autogenerated ID of the build. - $REPO_NAME: the source repository name specified by RepoSource. - $BRANCH_NAME: the branch name specified by RepoSource. - $TAG_NAME: the tag name specified by RepoSource. - $REVISION_ID or $COMMIT_SHA: the commit SHA specified by RepoSource or resolved from the specified branch or tag. - $SHORT_SHA: first 7 characters of $REVISION_ID or $COMMIT_SHA. # Contents of the build template. "artifacts": { # Artifacts produced by a build that should be uploaded upon successful completion of all build steps. # Artifacts produced by the build that should be uploaded upon successful completion of all build steps. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images will be pushed using the builder service account's credentials. The digests of the pushed images will be stored in the Build resource's results field. If any of the images fail to be pushed, the build is marked FAILURE. @@ -1377,6 +1394,9 @@

Method Details

"logStreamingOption": "A String", # Option to define build log streaming behavior to Google Cloud Storage. "logging": "A String", # Option to specify the logging mode, which determines if and where build logs are stored. "machineType": "A String", # Compute Engine machine type on which to run the build. + "pool": { # Details about how a build should be executed on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. # Optional. Specification for execution on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. + "name": "A String", # The `WorkerPool` resource to execute the build on. You must have `cloudbuild.workerpools.use` on the project hosting the WorkerPool. Format projects/{project}/locations/{location}/workerPools/{workerPoolId} + }, "requestedVerifyOption": "A String", # Requested verifiability options. "secretEnv": [ # A list of global environment variables, which are encrypted using a Cloud Key Management Service crypto key. These values must be specified in the build's `Secret`. These variables will be available to all build steps in this build. "A String", @@ -1391,7 +1411,7 @@

Method Details

"path": "A String", # Path at which to mount the volume. Paths must be absolute and cannot conflict with other volume paths on the same build step or with certain reserved volume paths. }, ], - "workerPool": "A String", # Option to specify a `WorkerPool` for the build. Format: projects/{project}/locations/{location}/workerPools/{workerPool} This field is in beta and is available only to restricted users. + "workerPool": "A String", # This field deprecated; please use `pool.name` instead. }, "projectId": "A String", # Output only. ID of the project. "queueTtl": "A String", # TTL in queue for this build. If provided and the build is enqueued longer than this value, the build will expire and the build status will be `EXPIRED`. The TTL starts ticking from create_time. @@ -1446,7 +1466,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview. + "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -1480,7 +1500,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. + "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -1615,6 +1635,7 @@

Method Details

An object of the form: { # Configuration for an automated build in response to source repository changes. + "autodetect": True or False, # Autodetect build configuration. The following precedence is used (case insensitive): 1. cloudbuild.yaml 2. cloudbuild.yml 3. cloudbuild.json 4. Dockerfile Currently only available for GitHub App Triggers. "build": { # A build resource in the Cloud Build API. At a high level, a `Build` describes where to find source code, how to build it (for example, the builder image to run on the source), and where to store the built artifacts. Fields can include the following variables, which will be expanded when the build is created: - $PROJECT_ID: the project ID of the build. - $PROJECT_NUMBER: the project number of the build. - $BUILD_ID: the autogenerated ID of the build. - $REPO_NAME: the source repository name specified by RepoSource. - $BRANCH_NAME: the branch name specified by RepoSource. - $TAG_NAME: the tag name specified by RepoSource. - $REVISION_ID or $COMMIT_SHA: the commit SHA specified by RepoSource or resolved from the specified branch or tag. - $SHORT_SHA: first 7 characters of $REVISION_ID or $COMMIT_SHA. # Contents of the build template. "artifacts": { # Artifacts produced by a build that should be uploaded upon successful completion of all build steps. # Artifacts produced by the build that should be uploaded upon successful completion of all build steps. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images will be pushed using the builder service account's credentials. The digests of the pushed images will be stored in the Build resource's results field. If any of the images fail to be pushed, the build is marked FAILURE. @@ -1666,6 +1687,9 @@

Method Details

"logStreamingOption": "A String", # Option to define build log streaming behavior to Google Cloud Storage. "logging": "A String", # Option to specify the logging mode, which determines if and where build logs are stored. "machineType": "A String", # Compute Engine machine type on which to run the build. + "pool": { # Details about how a build should be executed on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. # Optional. Specification for execution on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. + "name": "A String", # The `WorkerPool` resource to execute the build on. You must have `cloudbuild.workerpools.use` on the project hosting the WorkerPool. Format projects/{project}/locations/{location}/workerPools/{workerPoolId} + }, "requestedVerifyOption": "A String", # Requested verifiability options. "secretEnv": [ # A list of global environment variables, which are encrypted using a Cloud Key Management Service crypto key. These values must be specified in the build's `Secret`. These variables will be available to all build steps in this build. "A String", @@ -1680,7 +1704,7 @@

Method Details

"path": "A String", # Path at which to mount the volume. Paths must be absolute and cannot conflict with other volume paths on the same build step or with certain reserved volume paths. }, ], - "workerPool": "A String", # Option to specify a `WorkerPool` for the build. Format: projects/{project}/locations/{location}/workerPools/{workerPool} This field is in beta and is available only to restricted users. + "workerPool": "A String", # This field deprecated; please use `pool.name` instead. }, "projectId": "A String", # Output only. ID of the project. "queueTtl": "A String", # TTL in queue for this build. If provided and the build is enqueued longer than this value, the build will expire and the build status will be `EXPIRED`. The TTL starts ticking from create_time. @@ -1735,7 +1759,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview. + "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -1769,7 +1793,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. + "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. diff --git a/docs/dyn/cloudbuild_v1.projects.locations.workerPools.html b/docs/dyn/cloudbuild_v1.projects.locations.workerPools.html new file mode 100644 index 00000000000..074e5bc57d7 --- /dev/null +++ b/docs/dyn/cloudbuild_v1.projects.locations.workerPools.html @@ -0,0 +1,369 @@ + + + +

Cloud Build API . projects . locations . workerPools

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ create(parent, body=None, validateOnly=None, workerPoolId=None, x__xgafv=None)

+

Creates a `WorkerPool`.

+

+ delete(name, allowMissing=None, etag=None, validateOnly=None, x__xgafv=None)

+

Deletes a `WorkerPool`.

+

+ get(name, x__xgafv=None)

+

Returns details of a `WorkerPool`.

+

+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists `WorkerPool`s.

+

+ list_next(previous_request, previous_response)

+

Retrieves the next page of results.

+

+ patch(name, body=None, updateMask=None, validateOnly=None, x__xgafv=None)

+

Updates a `WorkerPool`.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ create(parent, body=None, validateOnly=None, workerPoolId=None, x__xgafv=None) +
Creates a `WorkerPool`.
+
+Args:
+  parent: string, Required. The parent resource where this worker pool will be created. Format: `projects/{project}/locations/{location}`. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Configuration for a `WorkerPool`. Cloud Build owns and maintains a pool of workers for general use and have no access to a project's private network. By default, builds submitted to Cloud Build will use a worker from this pool. If your build needs access to resources on a private network, create and use a `WorkerPool` to run your builds. Private `WorkerPool`s give your builds access to any single VPC network that you administer, including any on-prem resources connected to that VPC network. For an overview of custom worker pools, see [Custom workers overview](https://cloud.google.com/cloud-build/docs/custom-workers/custom-workers-overview).
+  "annotations": { # User specified annotations. See https://google.aip.dev/128#annotations for more details such as format and size limitations.
+    "a_key": "A String",
+  },
+  "createTime": "A String", # Output only. Time at which the request to create the `WorkerPool` was received.
+  "deleteTime": "A String", # Output only. Time at which the request to delete the `WorkerPool` was received.
+  "displayName": "A String", # A user-specified, human-readable name for the `WorkerPool`. If provided, this value must be 1-63 characters.
+  "etag": "A String", # Output only. Checksum computed by the server. May be sent on update and delete requests to ensure that the client has an up-to-date value before proceeding.
+  "name": "A String", # Output only. The resource name of the `WorkerPool`, with format `projects/{project}/locations/{location}/workerPools/{worker_pool}`. The value of `{worker_pool}` is provided by `worker_pool_id` in `CreateWorkerPool` request and the value of `{location}` is determined by the endpoint accessed.
+  "privatePoolV1Config": { # Configuration for a V1 `PrivatePool`. # Private Pool using a v1 configuration.
+    "networkConfig": { # Defines the network configuration for the pool. # Network configuration for the pool.
+      "egressOption": "A String", # Option to configure network egress for the workers.
+      "peeredNetwork": "A String", # Required. Immutable. The network definition that the workers are peered to. If this section is left empty, the workers will be peered to `WorkerPool.project_id` on the service producer network. Must be in the format `projects/{project}/global/networks/{network}`, where `{project}` is a project number, such as `12345`, and `{network}` is the name of a VPC network in the project. See [Understanding network configuration options](https://cloud.google.com/cloud-build/docs/custom-workers/set-up-custom-worker-pool-environment#understanding_the_network_configuration_options)
+    },
+    "workerConfig": { # Defines the configuration to be used for creating workers in the pool. # Machine configuration for the workers in the pool.
+      "diskSizeGb": "A String", # Size of the disk attached to the worker, in GB. See [Worker pool config file](https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size.
+      "machineType": "A String", # Machine type of a worker, such as `e2-medium`. See [Worker pool config file](https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use a sensible default.
+    },
+  },
+  "state": "A String", # Output only. `WorkerPool` state.
+  "uid": "A String", # Output only. A unique identifier for the `WorkerPool`.
+  "updateTime": "A String", # Output only. Time at which the request to update the `WorkerPool` was received.
+}
+
+  validateOnly: boolean, If set, validate the request and preview the response, but do not actually post it.
+  workerPoolId: string, Required. Immutable. The ID to use for the `WorkerPool`, which will become the final component of the resource name. This value should be 1-63 characters, and valid characters are /a-z-/.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ delete(name, allowMissing=None, etag=None, validateOnly=None, x__xgafv=None) +
Deletes a `WorkerPool`.
+
+Args:
+  name: string, Required. The name of the `WorkerPool` to delete. Format: `projects/{project}/locations/{workerPool}/workerPools/{workerPool}`. (required)
+  allowMissing: boolean, If set to true, and the `WorkerPool` is not found, the request will succeed but no action will be taken on the server.
+  etag: string, Optional. If this is provided, it must match the server's etag on the workerpool for the request to be processed.
+  validateOnly: boolean, If set, validate the request and preview the response, but do not actually post it.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ get(name, x__xgafv=None) +
Returns details of a `WorkerPool`.
+
+Args:
+  name: string, Required. The name of the `WorkerPool` to retrieve. Format: `projects/{project}/locations/{location}/workerPools/{workerPool}`. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Configuration for a `WorkerPool`. Cloud Build owns and maintains a pool of workers for general use and have no access to a project's private network. By default, builds submitted to Cloud Build will use a worker from this pool. If your build needs access to resources on a private network, create and use a `WorkerPool` to run your builds. Private `WorkerPool`s give your builds access to any single VPC network that you administer, including any on-prem resources connected to that VPC network. For an overview of custom worker pools, see [Custom workers overview](https://cloud.google.com/cloud-build/docs/custom-workers/custom-workers-overview).
+  "annotations": { # User specified annotations. See https://google.aip.dev/128#annotations for more details such as format and size limitations.
+    "a_key": "A String",
+  },
+  "createTime": "A String", # Output only. Time at which the request to create the `WorkerPool` was received.
+  "deleteTime": "A String", # Output only. Time at which the request to delete the `WorkerPool` was received.
+  "displayName": "A String", # A user-specified, human-readable name for the `WorkerPool`. If provided, this value must be 1-63 characters.
+  "etag": "A String", # Output only. Checksum computed by the server. May be sent on update and delete requests to ensure that the client has an up-to-date value before proceeding.
+  "name": "A String", # Output only. The resource name of the `WorkerPool`, with format `projects/{project}/locations/{location}/workerPools/{worker_pool}`. The value of `{worker_pool}` is provided by `worker_pool_id` in `CreateWorkerPool` request and the value of `{location}` is determined by the endpoint accessed.
+  "privatePoolV1Config": { # Configuration for a V1 `PrivatePool`. # Private Pool using a v1 configuration.
+    "networkConfig": { # Defines the network configuration for the pool. # Network configuration for the pool.
+      "egressOption": "A String", # Option to configure network egress for the workers.
+      "peeredNetwork": "A String", # Required. Immutable. The network definition that the workers are peered to. If this section is left empty, the workers will be peered to `WorkerPool.project_id` on the service producer network. Must be in the format `projects/{project}/global/networks/{network}`, where `{project}` is a project number, such as `12345`, and `{network}` is the name of a VPC network in the project. See [Understanding network configuration options](https://cloud.google.com/cloud-build/docs/custom-workers/set-up-custom-worker-pool-environment#understanding_the_network_configuration_options)
+    },
+    "workerConfig": { # Defines the configuration to be used for creating workers in the pool. # Machine configuration for the workers in the pool.
+      "diskSizeGb": "A String", # Size of the disk attached to the worker, in GB. See [Worker pool config file](https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size.
+      "machineType": "A String", # Machine type of a worker, such as `e2-medium`. See [Worker pool config file](https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use a sensible default.
+    },
+  },
+  "state": "A String", # Output only. `WorkerPool` state.
+  "uid": "A String", # Output only. A unique identifier for the `WorkerPool`.
+  "updateTime": "A String", # Output only. Time at which the request to update the `WorkerPool` was received.
+}
+
+ +
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None) +
Lists `WorkerPool`s.
+
+Args:
+  parent: string, Required. The parent of the collection of `WorkerPools`. Format: `projects/{project}/locations/location`. (required)
+  pageSize: integer, The maximum number of `WorkerPool`s to return. The service may return fewer than this value. If omitted, the server will use a sensible default.
+  pageToken: string, A page token, received from a previous `ListWorkerPools` call. Provide this to retrieve the subsequent page.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response containing existing `WorkerPools`.
+  "nextPageToken": "A String", # Continuation token used to page through large result sets. Provide this value in a subsequent ListWorkerPoolsRequest to return the next page of results.
+  "workerPools": [ # `WorkerPools` for the specified project.
+    { # Configuration for a `WorkerPool`. Cloud Build owns and maintains a pool of workers for general use and have no access to a project's private network. By default, builds submitted to Cloud Build will use a worker from this pool. If your build needs access to resources on a private network, create and use a `WorkerPool` to run your builds. Private `WorkerPool`s give your builds access to any single VPC network that you administer, including any on-prem resources connected to that VPC network. For an overview of custom worker pools, see [Custom workers overview](https://cloud.google.com/cloud-build/docs/custom-workers/custom-workers-overview).
+      "annotations": { # User specified annotations. See https://google.aip.dev/128#annotations for more details such as format and size limitations.
+        "a_key": "A String",
+      },
+      "createTime": "A String", # Output only. Time at which the request to create the `WorkerPool` was received.
+      "deleteTime": "A String", # Output only. Time at which the request to delete the `WorkerPool` was received.
+      "displayName": "A String", # A user-specified, human-readable name for the `WorkerPool`. If provided, this value must be 1-63 characters.
+      "etag": "A String", # Output only. Checksum computed by the server. May be sent on update and delete requests to ensure that the client has an up-to-date value before proceeding.
+      "name": "A String", # Output only. The resource name of the `WorkerPool`, with format `projects/{project}/locations/{location}/workerPools/{worker_pool}`. The value of `{worker_pool}` is provided by `worker_pool_id` in `CreateWorkerPool` request and the value of `{location}` is determined by the endpoint accessed.
+      "privatePoolV1Config": { # Configuration for a V1 `PrivatePool`. # Private Pool using a v1 configuration.
+        "networkConfig": { # Defines the network configuration for the pool. # Network configuration for the pool.
+          "egressOption": "A String", # Option to configure network egress for the workers.
+          "peeredNetwork": "A String", # Required. Immutable. The network definition that the workers are peered to. If this section is left empty, the workers will be peered to `WorkerPool.project_id` on the service producer network. Must be in the format `projects/{project}/global/networks/{network}`, where `{project}` is a project number, such as `12345`, and `{network}` is the name of a VPC network in the project. See [Understanding network configuration options](https://cloud.google.com/cloud-build/docs/custom-workers/set-up-custom-worker-pool-environment#understanding_the_network_configuration_options)
+        },
+        "workerConfig": { # Defines the configuration to be used for creating workers in the pool. # Machine configuration for the workers in the pool.
+          "diskSizeGb": "A String", # Size of the disk attached to the worker, in GB. See [Worker pool config file](https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size.
+          "machineType": "A String", # Machine type of a worker, such as `e2-medium`. See [Worker pool config file](https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use a sensible default.
+        },
+      },
+      "state": "A String", # Output only. `WorkerPool` state.
+      "uid": "A String", # Output only. A unique identifier for the `WorkerPool`.
+      "updateTime": "A String", # Output only. Time at which the request to update the `WorkerPool` was received.
+    },
+  ],
+}
+
+ +
+ list_next(previous_request, previous_response) +
Retrieves the next page of results.
+
+Args:
+  previous_request: The request for the previous page. (required)
+  previous_response: The response from the request for the previous page. (required)
+
+Returns:
+  A request object that you can call 'execute()' on to request the next
+  page. Returns None if there are no more items in the collection.
+    
+
+ +
+ patch(name, body=None, updateMask=None, validateOnly=None, x__xgafv=None) +
Updates a `WorkerPool`.
+
+Args:
+  name: string, Output only. The resource name of the `WorkerPool`, with format `projects/{project}/locations/{location}/workerPools/{worker_pool}`. The value of `{worker_pool}` is provided by `worker_pool_id` in `CreateWorkerPool` request and the value of `{location}` is determined by the endpoint accessed. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Configuration for a `WorkerPool`. Cloud Build owns and maintains a pool of workers for general use and have no access to a project's private network. By default, builds submitted to Cloud Build will use a worker from this pool. If your build needs access to resources on a private network, create and use a `WorkerPool` to run your builds. Private `WorkerPool`s give your builds access to any single VPC network that you administer, including any on-prem resources connected to that VPC network. For an overview of custom worker pools, see [Custom workers overview](https://cloud.google.com/cloud-build/docs/custom-workers/custom-workers-overview).
+  "annotations": { # User specified annotations. See https://google.aip.dev/128#annotations for more details such as format and size limitations.
+    "a_key": "A String",
+  },
+  "createTime": "A String", # Output only. Time at which the request to create the `WorkerPool` was received.
+  "deleteTime": "A String", # Output only. Time at which the request to delete the `WorkerPool` was received.
+  "displayName": "A String", # A user-specified, human-readable name for the `WorkerPool`. If provided, this value must be 1-63 characters.
+  "etag": "A String", # Output only. Checksum computed by the server. May be sent on update and delete requests to ensure that the client has an up-to-date value before proceeding.
+  "name": "A String", # Output only. The resource name of the `WorkerPool`, with format `projects/{project}/locations/{location}/workerPools/{worker_pool}`. The value of `{worker_pool}` is provided by `worker_pool_id` in `CreateWorkerPool` request and the value of `{location}` is determined by the endpoint accessed.
+  "privatePoolV1Config": { # Configuration for a V1 `PrivatePool`. # Private Pool using a v1 configuration.
+    "networkConfig": { # Defines the network configuration for the pool. # Network configuration for the pool.
+      "egressOption": "A String", # Option to configure network egress for the workers.
+      "peeredNetwork": "A String", # Required. Immutable. The network definition that the workers are peered to. If this section is left empty, the workers will be peered to `WorkerPool.project_id` on the service producer network. Must be in the format `projects/{project}/global/networks/{network}`, where `{project}` is a project number, such as `12345`, and `{network}` is the name of a VPC network in the project. See [Understanding network configuration options](https://cloud.google.com/cloud-build/docs/custom-workers/set-up-custom-worker-pool-environment#understanding_the_network_configuration_options)
+    },
+    "workerConfig": { # Defines the configuration to be used for creating workers in the pool. # Machine configuration for the workers in the pool.
+      "diskSizeGb": "A String", # Size of the disk attached to the worker, in GB. See [Worker pool config file](https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size.
+      "machineType": "A String", # Machine type of a worker, such as `e2-medium`. See [Worker pool config file](https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use a sensible default.
+    },
+  },
+  "state": "A String", # Output only. `WorkerPool` state.
+  "uid": "A String", # Output only. A unique identifier for the `WorkerPool`.
+  "updateTime": "A String", # Output only. Time at which the request to update the `WorkerPool` was received.
+}
+
+  updateMask: string, A mask specifying which fields in `worker_pool` to update.
+  validateOnly: boolean, If set, validate the request and preview the response, but do not actually post it.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/cloudbuild_v1.projects.triggers.html b/docs/dyn/cloudbuild_v1.projects.triggers.html index 2ec94dc6d79..757a91178bf 100644 --- a/docs/dyn/cloudbuild_v1.projects.triggers.html +++ b/docs/dyn/cloudbuild_v1.projects.triggers.html @@ -117,6 +117,7 @@

Method Details

The object takes the form of: { # Configuration for an automated build in response to source repository changes. + "autodetect": True or False, # Autodetect build configuration. The following precedence is used (case insensitive): 1. cloudbuild.yaml 2. cloudbuild.yml 3. cloudbuild.json 4. Dockerfile Currently only available for GitHub App Triggers. "build": { # A build resource in the Cloud Build API. At a high level, a `Build` describes where to find source code, how to build it (for example, the builder image to run on the source), and where to store the built artifacts. Fields can include the following variables, which will be expanded when the build is created: - $PROJECT_ID: the project ID of the build. - $PROJECT_NUMBER: the project number of the build. - $BUILD_ID: the autogenerated ID of the build. - $REPO_NAME: the source repository name specified by RepoSource. - $BRANCH_NAME: the branch name specified by RepoSource. - $TAG_NAME: the tag name specified by RepoSource. - $REVISION_ID or $COMMIT_SHA: the commit SHA specified by RepoSource or resolved from the specified branch or tag. - $SHORT_SHA: first 7 characters of $REVISION_ID or $COMMIT_SHA. # Contents of the build template. "artifacts": { # Artifacts produced by a build that should be uploaded upon successful completion of all build steps. # Artifacts produced by the build that should be uploaded upon successful completion of all build steps. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images will be pushed using the builder service account's credentials. The digests of the pushed images will be stored in the Build resource's results field. If any of the images fail to be pushed, the build is marked FAILURE. @@ -168,6 +169,9 @@

Method Details

"logStreamingOption": "A String", # Option to define build log streaming behavior to Google Cloud Storage. "logging": "A String", # Option to specify the logging mode, which determines if and where build logs are stored. "machineType": "A String", # Compute Engine machine type on which to run the build. + "pool": { # Details about how a build should be executed on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. # Optional. Specification for execution on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. + "name": "A String", # The `WorkerPool` resource to execute the build on. You must have `cloudbuild.workerpools.use` on the project hosting the WorkerPool. Format projects/{project}/locations/{location}/workerPools/{workerPoolId} + }, "requestedVerifyOption": "A String", # Requested verifiability options. "secretEnv": [ # A list of global environment variables, which are encrypted using a Cloud Key Management Service crypto key. These values must be specified in the build's `Secret`. These variables will be available to all build steps in this build. "A String", @@ -182,7 +186,7 @@

Method Details

"path": "A String", # Path at which to mount the volume. Paths must be absolute and cannot conflict with other volume paths on the same build step or with certain reserved volume paths. }, ], - "workerPool": "A String", # Option to specify a `WorkerPool` for the build. Format: projects/{project}/locations/{location}/workerPools/{workerPool} This field is in beta and is available only to restricted users. + "workerPool": "A String", # This field deprecated; please use `pool.name` instead. }, "projectId": "A String", # Output only. ID of the project. "queueTtl": "A String", # TTL in queue for this build. If provided and the build is enqueued longer than this value, the build will expire and the build status will be `EXPIRED`. The TTL starts ticking from create_time. @@ -237,7 +241,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview. + "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -271,7 +275,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. + "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -405,6 +409,7 @@

Method Details

An object of the form: { # Configuration for an automated build in response to source repository changes. + "autodetect": True or False, # Autodetect build configuration. The following precedence is used (case insensitive): 1. cloudbuild.yaml 2. cloudbuild.yml 3. cloudbuild.json 4. Dockerfile Currently only available for GitHub App Triggers. "build": { # A build resource in the Cloud Build API. At a high level, a `Build` describes where to find source code, how to build it (for example, the builder image to run on the source), and where to store the built artifacts. Fields can include the following variables, which will be expanded when the build is created: - $PROJECT_ID: the project ID of the build. - $PROJECT_NUMBER: the project number of the build. - $BUILD_ID: the autogenerated ID of the build. - $REPO_NAME: the source repository name specified by RepoSource. - $BRANCH_NAME: the branch name specified by RepoSource. - $TAG_NAME: the tag name specified by RepoSource. - $REVISION_ID or $COMMIT_SHA: the commit SHA specified by RepoSource or resolved from the specified branch or tag. - $SHORT_SHA: first 7 characters of $REVISION_ID or $COMMIT_SHA. # Contents of the build template. "artifacts": { # Artifacts produced by a build that should be uploaded upon successful completion of all build steps. # Artifacts produced by the build that should be uploaded upon successful completion of all build steps. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images will be pushed using the builder service account's credentials. The digests of the pushed images will be stored in the Build resource's results field. If any of the images fail to be pushed, the build is marked FAILURE. @@ -456,6 +461,9 @@

Method Details

"logStreamingOption": "A String", # Option to define build log streaming behavior to Google Cloud Storage. "logging": "A String", # Option to specify the logging mode, which determines if and where build logs are stored. "machineType": "A String", # Compute Engine machine type on which to run the build. + "pool": { # Details about how a build should be executed on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. # Optional. Specification for execution on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. + "name": "A String", # The `WorkerPool` resource to execute the build on. You must have `cloudbuild.workerpools.use` on the project hosting the WorkerPool. Format projects/{project}/locations/{location}/workerPools/{workerPoolId} + }, "requestedVerifyOption": "A String", # Requested verifiability options. "secretEnv": [ # A list of global environment variables, which are encrypted using a Cloud Key Management Service crypto key. These values must be specified in the build's `Secret`. These variables will be available to all build steps in this build. "A String", @@ -470,7 +478,7 @@

Method Details

"path": "A String", # Path at which to mount the volume. Paths must be absolute and cannot conflict with other volume paths on the same build step or with certain reserved volume paths. }, ], - "workerPool": "A String", # Option to specify a `WorkerPool` for the build. Format: projects/{project}/locations/{location}/workerPools/{workerPool} This field is in beta and is available only to restricted users. + "workerPool": "A String", # This field deprecated; please use `pool.name` instead. }, "projectId": "A String", # Output only. ID of the project. "queueTtl": "A String", # TTL in queue for this build. If provided and the build is enqueued longer than this value, the build will expire and the build status will be `EXPIRED`. The TTL starts ticking from create_time. @@ -525,7 +533,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview. + "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -559,7 +567,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. + "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -721,6 +729,7 @@

Method Details

An object of the form: { # Configuration for an automated build in response to source repository changes. + "autodetect": True or False, # Autodetect build configuration. The following precedence is used (case insensitive): 1. cloudbuild.yaml 2. cloudbuild.yml 3. cloudbuild.json 4. Dockerfile Currently only available for GitHub App Triggers. "build": { # A build resource in the Cloud Build API. At a high level, a `Build` describes where to find source code, how to build it (for example, the builder image to run on the source), and where to store the built artifacts. Fields can include the following variables, which will be expanded when the build is created: - $PROJECT_ID: the project ID of the build. - $PROJECT_NUMBER: the project number of the build. - $BUILD_ID: the autogenerated ID of the build. - $REPO_NAME: the source repository name specified by RepoSource. - $BRANCH_NAME: the branch name specified by RepoSource. - $TAG_NAME: the tag name specified by RepoSource. - $REVISION_ID or $COMMIT_SHA: the commit SHA specified by RepoSource or resolved from the specified branch or tag. - $SHORT_SHA: first 7 characters of $REVISION_ID or $COMMIT_SHA. # Contents of the build template. "artifacts": { # Artifacts produced by a build that should be uploaded upon successful completion of all build steps. # Artifacts produced by the build that should be uploaded upon successful completion of all build steps. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images will be pushed using the builder service account's credentials. The digests of the pushed images will be stored in the Build resource's results field. If any of the images fail to be pushed, the build is marked FAILURE. @@ -772,6 +781,9 @@

Method Details

"logStreamingOption": "A String", # Option to define build log streaming behavior to Google Cloud Storage. "logging": "A String", # Option to specify the logging mode, which determines if and where build logs are stored. "machineType": "A String", # Compute Engine machine type on which to run the build. + "pool": { # Details about how a build should be executed on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. # Optional. Specification for execution on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. + "name": "A String", # The `WorkerPool` resource to execute the build on. You must have `cloudbuild.workerpools.use` on the project hosting the WorkerPool. Format projects/{project}/locations/{location}/workerPools/{workerPoolId} + }, "requestedVerifyOption": "A String", # Requested verifiability options. "secretEnv": [ # A list of global environment variables, which are encrypted using a Cloud Key Management Service crypto key. These values must be specified in the build's `Secret`. These variables will be available to all build steps in this build. "A String", @@ -786,7 +798,7 @@

Method Details

"path": "A String", # Path at which to mount the volume. Paths must be absolute and cannot conflict with other volume paths on the same build step or with certain reserved volume paths. }, ], - "workerPool": "A String", # Option to specify a `WorkerPool` for the build. Format: projects/{project}/locations/{location}/workerPools/{workerPool} This field is in beta and is available only to restricted users. + "workerPool": "A String", # This field deprecated; please use `pool.name` instead. }, "projectId": "A String", # Output only. ID of the project. "queueTtl": "A String", # TTL in queue for this build. If provided and the build is enqueued longer than this value, the build will expire and the build status will be `EXPIRED`. The TTL starts ticking from create_time. @@ -841,7 +853,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview. + "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -875,7 +887,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. + "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -1021,6 +1033,7 @@

Method Details

"nextPageToken": "A String", # Token to receive the next page of results. "triggers": [ # `BuildTriggers` for the project, sorted by `create_time` descending. { # Configuration for an automated build in response to source repository changes. + "autodetect": True or False, # Autodetect build configuration. The following precedence is used (case insensitive): 1. cloudbuild.yaml 2. cloudbuild.yml 3. cloudbuild.json 4. Dockerfile Currently only available for GitHub App Triggers. "build": { # A build resource in the Cloud Build API. At a high level, a `Build` describes where to find source code, how to build it (for example, the builder image to run on the source), and where to store the built artifacts. Fields can include the following variables, which will be expanded when the build is created: - $PROJECT_ID: the project ID of the build. - $PROJECT_NUMBER: the project number of the build. - $BUILD_ID: the autogenerated ID of the build. - $REPO_NAME: the source repository name specified by RepoSource. - $BRANCH_NAME: the branch name specified by RepoSource. - $TAG_NAME: the tag name specified by RepoSource. - $REVISION_ID or $COMMIT_SHA: the commit SHA specified by RepoSource or resolved from the specified branch or tag. - $SHORT_SHA: first 7 characters of $REVISION_ID or $COMMIT_SHA. # Contents of the build template. "artifacts": { # Artifacts produced by a build that should be uploaded upon successful completion of all build steps. # Artifacts produced by the build that should be uploaded upon successful completion of all build steps. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images will be pushed using the builder service account's credentials. The digests of the pushed images will be stored in the Build resource's results field. If any of the images fail to be pushed, the build is marked FAILURE. @@ -1072,6 +1085,9 @@

Method Details

"logStreamingOption": "A String", # Option to define build log streaming behavior to Google Cloud Storage. "logging": "A String", # Option to specify the logging mode, which determines if and where build logs are stored. "machineType": "A String", # Compute Engine machine type on which to run the build. + "pool": { # Details about how a build should be executed on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. # Optional. Specification for execution on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. + "name": "A String", # The `WorkerPool` resource to execute the build on. You must have `cloudbuild.workerpools.use` on the project hosting the WorkerPool. Format projects/{project}/locations/{location}/workerPools/{workerPoolId} + }, "requestedVerifyOption": "A String", # Requested verifiability options. "secretEnv": [ # A list of global environment variables, which are encrypted using a Cloud Key Management Service crypto key. These values must be specified in the build's `Secret`. These variables will be available to all build steps in this build. "A String", @@ -1086,7 +1102,7 @@

Method Details

"path": "A String", # Path at which to mount the volume. Paths must be absolute and cannot conflict with other volume paths on the same build step or with certain reserved volume paths. }, ], - "workerPool": "A String", # Option to specify a `WorkerPool` for the build. Format: projects/{project}/locations/{location}/workerPools/{workerPool} This field is in beta and is available only to restricted users. + "workerPool": "A String", # This field deprecated; please use `pool.name` instead. }, "projectId": "A String", # Output only. ID of the project. "queueTtl": "A String", # TTL in queue for this build. If provided and the build is enqueued longer than this value, the build will expire and the build status will be `EXPIRED`. The TTL starts ticking from create_time. @@ -1141,7 +1157,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview. + "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -1175,7 +1191,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. + "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -1327,6 +1343,7 @@

Method Details

The object takes the form of: { # Configuration for an automated build in response to source repository changes. + "autodetect": True or False, # Autodetect build configuration. The following precedence is used (case insensitive): 1. cloudbuild.yaml 2. cloudbuild.yml 3. cloudbuild.json 4. Dockerfile Currently only available for GitHub App Triggers. "build": { # A build resource in the Cloud Build API. At a high level, a `Build` describes where to find source code, how to build it (for example, the builder image to run on the source), and where to store the built artifacts. Fields can include the following variables, which will be expanded when the build is created: - $PROJECT_ID: the project ID of the build. - $PROJECT_NUMBER: the project number of the build. - $BUILD_ID: the autogenerated ID of the build. - $REPO_NAME: the source repository name specified by RepoSource. - $BRANCH_NAME: the branch name specified by RepoSource. - $TAG_NAME: the tag name specified by RepoSource. - $REVISION_ID or $COMMIT_SHA: the commit SHA specified by RepoSource or resolved from the specified branch or tag. - $SHORT_SHA: first 7 characters of $REVISION_ID or $COMMIT_SHA. # Contents of the build template. "artifacts": { # Artifacts produced by a build that should be uploaded upon successful completion of all build steps. # Artifacts produced by the build that should be uploaded upon successful completion of all build steps. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images will be pushed using the builder service account's credentials. The digests of the pushed images will be stored in the Build resource's results field. If any of the images fail to be pushed, the build is marked FAILURE. @@ -1378,6 +1395,9 @@

Method Details

"logStreamingOption": "A String", # Option to define build log streaming behavior to Google Cloud Storage. "logging": "A String", # Option to specify the logging mode, which determines if and where build logs are stored. "machineType": "A String", # Compute Engine machine type on which to run the build. + "pool": { # Details about how a build should be executed on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. # Optional. Specification for execution on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. + "name": "A String", # The `WorkerPool` resource to execute the build on. You must have `cloudbuild.workerpools.use` on the project hosting the WorkerPool. Format projects/{project}/locations/{location}/workerPools/{workerPoolId} + }, "requestedVerifyOption": "A String", # Requested verifiability options. "secretEnv": [ # A list of global environment variables, which are encrypted using a Cloud Key Management Service crypto key. These values must be specified in the build's `Secret`. These variables will be available to all build steps in this build. "A String", @@ -1392,7 +1412,7 @@

Method Details

"path": "A String", # Path at which to mount the volume. Paths must be absolute and cannot conflict with other volume paths on the same build step or with certain reserved volume paths. }, ], - "workerPool": "A String", # Option to specify a `WorkerPool` for the build. Format: projects/{project}/locations/{location}/workerPools/{workerPool} This field is in beta and is available only to restricted users. + "workerPool": "A String", # This field deprecated; please use `pool.name` instead. }, "projectId": "A String", # Output only. ID of the project. "queueTtl": "A String", # TTL in queue for this build. If provided and the build is enqueued longer than this value, the build will expire and the build status will be `EXPIRED`. The TTL starts ticking from create_time. @@ -1447,7 +1467,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview. + "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -1481,7 +1501,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. + "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -1614,6 +1634,7 @@

Method Details

An object of the form: { # Configuration for an automated build in response to source repository changes. + "autodetect": True or False, # Autodetect build configuration. The following precedence is used (case insensitive): 1. cloudbuild.yaml 2. cloudbuild.yml 3. cloudbuild.json 4. Dockerfile Currently only available for GitHub App Triggers. "build": { # A build resource in the Cloud Build API. At a high level, a `Build` describes where to find source code, how to build it (for example, the builder image to run on the source), and where to store the built artifacts. Fields can include the following variables, which will be expanded when the build is created: - $PROJECT_ID: the project ID of the build. - $PROJECT_NUMBER: the project number of the build. - $BUILD_ID: the autogenerated ID of the build. - $REPO_NAME: the source repository name specified by RepoSource. - $BRANCH_NAME: the branch name specified by RepoSource. - $TAG_NAME: the tag name specified by RepoSource. - $REVISION_ID or $COMMIT_SHA: the commit SHA specified by RepoSource or resolved from the specified branch or tag. - $SHORT_SHA: first 7 characters of $REVISION_ID or $COMMIT_SHA. # Contents of the build template. "artifacts": { # Artifacts produced by a build that should be uploaded upon successful completion of all build steps. # Artifacts produced by the build that should be uploaded upon successful completion of all build steps. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images will be pushed using the builder service account's credentials. The digests of the pushed images will be stored in the Build resource's results field. If any of the images fail to be pushed, the build is marked FAILURE. @@ -1665,6 +1686,9 @@

Method Details

"logStreamingOption": "A String", # Option to define build log streaming behavior to Google Cloud Storage. "logging": "A String", # Option to specify the logging mode, which determines if and where build logs are stored. "machineType": "A String", # Compute Engine machine type on which to run the build. + "pool": { # Details about how a build should be executed on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. # Optional. Specification for execution on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information. + "name": "A String", # The `WorkerPool` resource to execute the build on. You must have `cloudbuild.workerpools.use` on the project hosting the WorkerPool. Format projects/{project}/locations/{location}/workerPools/{workerPoolId} + }, "requestedVerifyOption": "A String", # Requested verifiability options. "secretEnv": [ # A list of global environment variables, which are encrypted using a Cloud Key Management Service crypto key. These values must be specified in the build's `Secret`. These variables will be available to all build steps in this build. "A String", @@ -1679,7 +1703,7 @@

Method Details

"path": "A String", # Path at which to mount the volume. Paths must be absolute and cannot conflict with other volume paths on the same build step or with certain reserved volume paths. }, ], - "workerPool": "A String", # Option to specify a `WorkerPool` for the build. Format: projects/{project}/locations/{location}/workerPools/{workerPool} This field is in beta and is available only to restricted users. + "workerPool": "A String", # This field deprecated; please use `pool.name` instead. }, "projectId": "A String", # Output only. ID of the project. "queueTtl": "A String", # TTL in queue for this build. If provided and the build is enqueued longer than this value, the build will expire and the build status will be `EXPIRED`. The TTL starts ticking from create_time. @@ -1734,7 +1758,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview. + "storageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. @@ -1768,7 +1792,7 @@

Method Details

"generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source. This object must be a gzipped archive file (`.tar.gz`) containing source to build. }, - "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview. # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. + "resolvedStorageSourceManifest": { # Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). # A copy of the build's `source.storage_source_manifest`, if exists, with any revisions resolved. This feature is in Preview. "bucket": "A String", # Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). "generation": "A String", # Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. "object": "A String", # Google Cloud Storage object containing the source manifest. This object must be a JSON file. diff --git a/docs/dyn/cloudchannel_v1.accounts.channelPartnerLinks.customers.html b/docs/dyn/cloudchannel_v1.accounts.channelPartnerLinks.customers.html index 86f97d2ad9f..b0f1ab64a5e 100644 --- a/docs/dyn/cloudchannel_v1.accounts.channelPartnerLinks.customers.html +++ b/docs/dyn/cloudchannel_v1.accounts.channelPartnerLinks.customers.html @@ -111,7 +111,7 @@

Method Details

The object takes the form of: { # Entity representing a customer of a reseller or distributor. - "alternateEmail": "A String", # Secondary contact email. You need to provide an alternate email to create different domains if a primary contact email already exists. Users will receive a notification with credentials when you create an admin.google.com account. Secondary emails are also recovery email addresses. + "alternateEmail": "A String", # Secondary contact email. You need to provide an alternate email to create different domains if a primary contact email already exists. Users will receive a notification with credentials when you create an admin.google.com account. Secondary emails are also recovery email addresses. Alternate emails are optional when you create Team customers. "channelPartnerId": "A String", # Cloud Identity ID of the customer's channel partner. Populated only if a channel partner exists for this customer. "cloudIdentityId": "A String", # Output only. The customer's Cloud Identity ID if the customer has a Cloud Identity resource. "cloudIdentityInfo": { # Cloud Identity information for the Cloud Channel Customer. # Output only. Cloud Identity information for the customer. Populated only if a Cloud Identity account exists for this customer. @@ -152,9 +152,9 @@

Method Details

}, "primaryContactInfo": { # Contact information for a customer account. # Primary contact info. "displayName": "A String", # Output only. The customer account contact's display name, formatted as a combination of the customer's first and last name. - "email": "A String", # The customer account's contact email. Required for entitlements that create admin.google.com accounts, and serves as the customer's username for those accounts. - "firstName": "A String", # The customer account contact's first name. - "lastName": "A String", # The customer account contact's last name. + "email": "A String", # The customer account's contact email. Required for entitlements that create admin.google.com accounts, and serves as the customer's username for those accounts. Use this email to invite Team customers. + "firstName": "A String", # The customer account contact's first name. Optional for Team customers. + "lastName": "A String", # The customer account contact's last name. Optional for Team customers. "phone": "A String", # The customer account's contact phone number. "title": "A String", # Optional. The customer account contact's job title. }, @@ -170,7 +170,7 @@

Method Details

An object of the form: { # Entity representing a customer of a reseller or distributor. - "alternateEmail": "A String", # Secondary contact email. You need to provide an alternate email to create different domains if a primary contact email already exists. Users will receive a notification with credentials when you create an admin.google.com account. Secondary emails are also recovery email addresses. + "alternateEmail": "A String", # Secondary contact email. You need to provide an alternate email to create different domains if a primary contact email already exists. Users will receive a notification with credentials when you create an admin.google.com account. Secondary emails are also recovery email addresses. Alternate emails are optional when you create Team customers. "channelPartnerId": "A String", # Cloud Identity ID of the customer's channel partner. Populated only if a channel partner exists for this customer. "cloudIdentityId": "A String", # Output only. The customer's Cloud Identity ID if the customer has a Cloud Identity resource. "cloudIdentityInfo": { # Cloud Identity information for the Cloud Channel Customer. # Output only. Cloud Identity information for the customer. Populated only if a Cloud Identity account exists for this customer. @@ -211,9 +211,9 @@

Method Details

}, "primaryContactInfo": { # Contact information for a customer account. # Primary contact info. "displayName": "A String", # Output only. The customer account contact's display name, formatted as a combination of the customer's first and last name. - "email": "A String", # The customer account's contact email. Required for entitlements that create admin.google.com accounts, and serves as the customer's username for those accounts. - "firstName": "A String", # The customer account contact's first name. - "lastName": "A String", # The customer account contact's last name. + "email": "A String", # The customer account's contact email. Required for entitlements that create admin.google.com accounts, and serves as the customer's username for those accounts. Use this email to invite Team customers. + "firstName": "A String", # The customer account contact's first name. Optional for Team customers. + "lastName": "A String", # The customer account contact's last name. Optional for Team customers. "phone": "A String", # The customer account's contact phone number. "title": "A String", # Optional. The customer account contact's job title. }, @@ -254,7 +254,7 @@

Method Details

An object of the form: { # Entity representing a customer of a reseller or distributor. - "alternateEmail": "A String", # Secondary contact email. You need to provide an alternate email to create different domains if a primary contact email already exists. Users will receive a notification with credentials when you create an admin.google.com account. Secondary emails are also recovery email addresses. + "alternateEmail": "A String", # Secondary contact email. You need to provide an alternate email to create different domains if a primary contact email already exists. Users will receive a notification with credentials when you create an admin.google.com account. Secondary emails are also recovery email addresses. Alternate emails are optional when you create Team customers. "channelPartnerId": "A String", # Cloud Identity ID of the customer's channel partner. Populated only if a channel partner exists for this customer. "cloudIdentityId": "A String", # Output only. The customer's Cloud Identity ID if the customer has a Cloud Identity resource. "cloudIdentityInfo": { # Cloud Identity information for the Cloud Channel Customer. # Output only. Cloud Identity information for the customer. Populated only if a Cloud Identity account exists for this customer. @@ -295,9 +295,9 @@

Method Details

}, "primaryContactInfo": { # Contact information for a customer account. # Primary contact info. "displayName": "A String", # Output only. The customer account contact's display name, formatted as a combination of the customer's first and last name. - "email": "A String", # The customer account's contact email. Required for entitlements that create admin.google.com accounts, and serves as the customer's username for those accounts. - "firstName": "A String", # The customer account contact's first name. - "lastName": "A String", # The customer account contact's last name. + "email": "A String", # The customer account's contact email. Required for entitlements that create admin.google.com accounts, and serves as the customer's username for those accounts. Use this email to invite Team customers. + "firstName": "A String", # The customer account contact's first name. Optional for Team customers. + "lastName": "A String", # The customer account contact's last name. Optional for Team customers. "phone": "A String", # The customer account's contact phone number. "title": "A String", # Optional. The customer account contact's job title. }, @@ -324,7 +324,7 @@

Method Details

{ # Response message for CloudChannelService.ListCustomers. "customers": [ # The customers belonging to a reseller or distributor. { # Entity representing a customer of a reseller or distributor. - "alternateEmail": "A String", # Secondary contact email. You need to provide an alternate email to create different domains if a primary contact email already exists. Users will receive a notification with credentials when you create an admin.google.com account. Secondary emails are also recovery email addresses. + "alternateEmail": "A String", # Secondary contact email. You need to provide an alternate email to create different domains if a primary contact email already exists. Users will receive a notification with credentials when you create an admin.google.com account. Secondary emails are also recovery email addresses. Alternate emails are optional when you create Team customers. "channelPartnerId": "A String", # Cloud Identity ID of the customer's channel partner. Populated only if a channel partner exists for this customer. "cloudIdentityId": "A String", # Output only. The customer's Cloud Identity ID if the customer has a Cloud Identity resource. "cloudIdentityInfo": { # Cloud Identity information for the Cloud Channel Customer. # Output only. Cloud Identity information for the customer. Populated only if a Cloud Identity account exists for this customer. @@ -365,9 +365,9 @@

Method Details

}, "primaryContactInfo": { # Contact information for a customer account. # Primary contact info. "displayName": "A String", # Output only. The customer account contact's display name, formatted as a combination of the customer's first and last name. - "email": "A String", # The customer account's contact email. Required for entitlements that create admin.google.com accounts, and serves as the customer's username for those accounts. - "firstName": "A String", # The customer account contact's first name. - "lastName": "A String", # The customer account contact's last name. + "email": "A String", # The customer account's contact email. Required for entitlements that create admin.google.com accounts, and serves as the customer's username for those accounts. Use this email to invite Team customers. + "firstName": "A String", # The customer account contact's first name. Optional for Team customers. + "lastName": "A String", # The customer account contact's last name. Optional for Team customers. "phone": "A String", # The customer account's contact phone number. "title": "A String", # Optional. The customer account contact's job title. }, @@ -402,7 +402,7 @@

Method Details

The object takes the form of: { # Entity representing a customer of a reseller or distributor. - "alternateEmail": "A String", # Secondary contact email. You need to provide an alternate email to create different domains if a primary contact email already exists. Users will receive a notification with credentials when you create an admin.google.com account. Secondary emails are also recovery email addresses. + "alternateEmail": "A String", # Secondary contact email. You need to provide an alternate email to create different domains if a primary contact email already exists. Users will receive a notification with credentials when you create an admin.google.com account. Secondary emails are also recovery email addresses. Alternate emails are optional when you create Team customers. "channelPartnerId": "A String", # Cloud Identity ID of the customer's channel partner. Populated only if a channel partner exists for this customer. "cloudIdentityId": "A String", # Output only. The customer's Cloud Identity ID if the customer has a Cloud Identity resource. "cloudIdentityInfo": { # Cloud Identity information for the Cloud Channel Customer. # Output only. Cloud Identity information for the customer. Populated only if a Cloud Identity account exists for this customer. @@ -443,9 +443,9 @@

Method Details

}, "primaryContactInfo": { # Contact information for a customer account. # Primary contact info. "displayName": "A String", # Output only. The customer account contact's display name, formatted as a combination of the customer's first and last name. - "email": "A String", # The customer account's contact email. Required for entitlements that create admin.google.com accounts, and serves as the customer's username for those accounts. - "firstName": "A String", # The customer account contact's first name. - "lastName": "A String", # The customer account contact's last name. + "email": "A String", # The customer account's contact email. Required for entitlements that create admin.google.com accounts, and serves as the customer's username for those accounts. Use this email to invite Team customers. + "firstName": "A String", # The customer account contact's first name. Optional for Team customers. + "lastName": "A String", # The customer account contact's last name. Optional for Team customers. "phone": "A String", # The customer account's contact phone number. "title": "A String", # Optional. The customer account contact's job title. }, @@ -462,7 +462,7 @@

Method Details

An object of the form: { # Entity representing a customer of a reseller or distributor. - "alternateEmail": "A String", # Secondary contact email. You need to provide an alternate email to create different domains if a primary contact email already exists. Users will receive a notification with credentials when you create an admin.google.com account. Secondary emails are also recovery email addresses. + "alternateEmail": "A String", # Secondary contact email. You need to provide an alternate email to create different domains if a primary contact email already exists. Users will receive a notification with credentials when you create an admin.google.com account. Secondary emails are also recovery email addresses. Alternate emails are optional when you create Team customers. "channelPartnerId": "A String", # Cloud Identity ID of the customer's channel partner. Populated only if a channel partner exists for this customer. "cloudIdentityId": "A String", # Output only. The customer's Cloud Identity ID if the customer has a Cloud Identity resource. "cloudIdentityInfo": { # Cloud Identity information for the Cloud Channel Customer. # Output only. Cloud Identity information for the customer. Populated only if a Cloud Identity account exists for this customer. @@ -503,9 +503,9 @@

Method Details

}, "primaryContactInfo": { # Contact information for a customer account. # Primary contact info. "displayName": "A String", # Output only. The customer account contact's display name, formatted as a combination of the customer's first and last name. - "email": "A String", # The customer account's contact email. Required for entitlements that create admin.google.com accounts, and serves as the customer's username for those accounts. - "firstName": "A String", # The customer account contact's first name. - "lastName": "A String", # The customer account contact's last name. + "email": "A String", # The customer account's contact email. Required for entitlements that create admin.google.com accounts, and serves as the customer's username for those accounts. Use this email to invite Team customers. + "firstName": "A String", # The customer account contact's first name. Optional for Team customers. + "lastName": "A String", # The customer account contact's last name. Optional for Team customers. "phone": "A String", # The customer account's contact phone number. "title": "A String", # Optional. The customer account contact's job title. }, diff --git a/docs/dyn/cloudchannel_v1.accounts.customers.html b/docs/dyn/cloudchannel_v1.accounts.customers.html index 3627e55b843..77437554299 100644 --- a/docs/dyn/cloudchannel_v1.accounts.customers.html +++ b/docs/dyn/cloudchannel_v1.accounts.customers.html @@ -137,7 +137,7 @@

Method Details

The object takes the form of: { # Entity representing a customer of a reseller or distributor. - "alternateEmail": "A String", # Secondary contact email. You need to provide an alternate email to create different domains if a primary contact email already exists. Users will receive a notification with credentials when you create an admin.google.com account. Secondary emails are also recovery email addresses. + "alternateEmail": "A String", # Secondary contact email. You need to provide an alternate email to create different domains if a primary contact email already exists. Users will receive a notification with credentials when you create an admin.google.com account. Secondary emails are also recovery email addresses. Alternate emails are optional when you create Team customers. "channelPartnerId": "A String", # Cloud Identity ID of the customer's channel partner. Populated only if a channel partner exists for this customer. "cloudIdentityId": "A String", # Output only. The customer's Cloud Identity ID if the customer has a Cloud Identity resource. "cloudIdentityInfo": { # Cloud Identity information for the Cloud Channel Customer. # Output only. Cloud Identity information for the customer. Populated only if a Cloud Identity account exists for this customer. @@ -178,9 +178,9 @@

Method Details

}, "primaryContactInfo": { # Contact information for a customer account. # Primary contact info. "displayName": "A String", # Output only. The customer account contact's display name, formatted as a combination of the customer's first and last name. - "email": "A String", # The customer account's contact email. Required for entitlements that create admin.google.com accounts, and serves as the customer's username for those accounts. - "firstName": "A String", # The customer account contact's first name. - "lastName": "A String", # The customer account contact's last name. + "email": "A String", # The customer account's contact email. Required for entitlements that create admin.google.com accounts, and serves as the customer's username for those accounts. Use this email to invite Team customers. + "firstName": "A String", # The customer account contact's first name. Optional for Team customers. + "lastName": "A String", # The customer account contact's last name. Optional for Team customers. "phone": "A String", # The customer account's contact phone number. "title": "A String", # Optional. The customer account contact's job title. }, @@ -196,7 +196,7 @@

Method Details

An object of the form: { # Entity representing a customer of a reseller or distributor. - "alternateEmail": "A String", # Secondary contact email. You need to provide an alternate email to create different domains if a primary contact email already exists. Users will receive a notification with credentials when you create an admin.google.com account. Secondary emails are also recovery email addresses. + "alternateEmail": "A String", # Secondary contact email. You need to provide an alternate email to create different domains if a primary contact email already exists. Users will receive a notification with credentials when you create an admin.google.com account. Secondary emails are also recovery email addresses. Alternate emails are optional when you create Team customers. "channelPartnerId": "A String", # Cloud Identity ID of the customer's channel partner. Populated only if a channel partner exists for this customer. "cloudIdentityId": "A String", # Output only. The customer's Cloud Identity ID if the customer has a Cloud Identity resource. "cloudIdentityInfo": { # Cloud Identity information for the Cloud Channel Customer. # Output only. Cloud Identity information for the customer. Populated only if a Cloud Identity account exists for this customer. @@ -237,9 +237,9 @@

Method Details

}, "primaryContactInfo": { # Contact information for a customer account. # Primary contact info. "displayName": "A String", # Output only. The customer account contact's display name, formatted as a combination of the customer's first and last name. - "email": "A String", # The customer account's contact email. Required for entitlements that create admin.google.com accounts, and serves as the customer's username for those accounts. - "firstName": "A String", # The customer account contact's first name. - "lastName": "A String", # The customer account contact's last name. + "email": "A String", # The customer account's contact email. Required for entitlements that create admin.google.com accounts, and serves as the customer's username for those accounts. Use this email to invite Team customers. + "firstName": "A String", # The customer account contact's first name. Optional for Team customers. + "lastName": "A String", # The customer account contact's last name. Optional for Team customers. "phone": "A String", # The customer account's contact phone number. "title": "A String", # Optional. The customer account contact's job title. }, @@ -280,7 +280,7 @@

Method Details

An object of the form: { # Entity representing a customer of a reseller or distributor. - "alternateEmail": "A String", # Secondary contact email. You need to provide an alternate email to create different domains if a primary contact email already exists. Users will receive a notification with credentials when you create an admin.google.com account. Secondary emails are also recovery email addresses. + "alternateEmail": "A String", # Secondary contact email. You need to provide an alternate email to create different domains if a primary contact email already exists. Users will receive a notification with credentials when you create an admin.google.com account. Secondary emails are also recovery email addresses. Alternate emails are optional when you create Team customers. "channelPartnerId": "A String", # Cloud Identity ID of the customer's channel partner. Populated only if a channel partner exists for this customer. "cloudIdentityId": "A String", # Output only. The customer's Cloud Identity ID if the customer has a Cloud Identity resource. "cloudIdentityInfo": { # Cloud Identity information for the Cloud Channel Customer. # Output only. Cloud Identity information for the customer. Populated only if a Cloud Identity account exists for this customer. @@ -321,9 +321,9 @@

Method Details

}, "primaryContactInfo": { # Contact information for a customer account. # Primary contact info. "displayName": "A String", # Output only. The customer account contact's display name, formatted as a combination of the customer's first and last name. - "email": "A String", # The customer account's contact email. Required for entitlements that create admin.google.com accounts, and serves as the customer's username for those accounts. - "firstName": "A String", # The customer account contact's first name. - "lastName": "A String", # The customer account contact's last name. + "email": "A String", # The customer account's contact email. Required for entitlements that create admin.google.com accounts, and serves as the customer's username for those accounts. Use this email to invite Team customers. + "firstName": "A String", # The customer account contact's first name. Optional for Team customers. + "lastName": "A String", # The customer account contact's last name. Optional for Team customers. "phone": "A String", # The customer account's contact phone number. "title": "A String", # Optional. The customer account contact's job title. }, @@ -350,7 +350,7 @@

Method Details

{ # Response message for CloudChannelService.ListCustomers. "customers": [ # The customers belonging to a reseller or distributor. { # Entity representing a customer of a reseller or distributor. - "alternateEmail": "A String", # Secondary contact email. You need to provide an alternate email to create different domains if a primary contact email already exists. Users will receive a notification with credentials when you create an admin.google.com account. Secondary emails are also recovery email addresses. + "alternateEmail": "A String", # Secondary contact email. You need to provide an alternate email to create different domains if a primary contact email already exists. Users will receive a notification with credentials when you create an admin.google.com account. Secondary emails are also recovery email addresses. Alternate emails are optional when you create Team customers. "channelPartnerId": "A String", # Cloud Identity ID of the customer's channel partner. Populated only if a channel partner exists for this customer. "cloudIdentityId": "A String", # Output only. The customer's Cloud Identity ID if the customer has a Cloud Identity resource. "cloudIdentityInfo": { # Cloud Identity information for the Cloud Channel Customer. # Output only. Cloud Identity information for the customer. Populated only if a Cloud Identity account exists for this customer. @@ -391,9 +391,9 @@

Method Details

}, "primaryContactInfo": { # Contact information for a customer account. # Primary contact info. "displayName": "A String", # Output only. The customer account contact's display name, formatted as a combination of the customer's first and last name. - "email": "A String", # The customer account's contact email. Required for entitlements that create admin.google.com accounts, and serves as the customer's username for those accounts. - "firstName": "A String", # The customer account contact's first name. - "lastName": "A String", # The customer account contact's last name. + "email": "A String", # The customer account's contact email. Required for entitlements that create admin.google.com accounts, and serves as the customer's username for those accounts. Use this email to invite Team customers. + "firstName": "A String", # The customer account contact's first name. Optional for Team customers. + "lastName": "A String", # The customer account contact's last name. Optional for Team customers. "phone": "A String", # The customer account's contact phone number. "title": "A String", # Optional. The customer account contact's job title. }, @@ -702,7 +702,7 @@

Method Details

The object takes the form of: { # Entity representing a customer of a reseller or distributor. - "alternateEmail": "A String", # Secondary contact email. You need to provide an alternate email to create different domains if a primary contact email already exists. Users will receive a notification with credentials when you create an admin.google.com account. Secondary emails are also recovery email addresses. + "alternateEmail": "A String", # Secondary contact email. You need to provide an alternate email to create different domains if a primary contact email already exists. Users will receive a notification with credentials when you create an admin.google.com account. Secondary emails are also recovery email addresses. Alternate emails are optional when you create Team customers. "channelPartnerId": "A String", # Cloud Identity ID of the customer's channel partner. Populated only if a channel partner exists for this customer. "cloudIdentityId": "A String", # Output only. The customer's Cloud Identity ID if the customer has a Cloud Identity resource. "cloudIdentityInfo": { # Cloud Identity information for the Cloud Channel Customer. # Output only. Cloud Identity information for the customer. Populated only if a Cloud Identity account exists for this customer. @@ -743,9 +743,9 @@

Method Details

}, "primaryContactInfo": { # Contact information for a customer account. # Primary contact info. "displayName": "A String", # Output only. The customer account contact's display name, formatted as a combination of the customer's first and last name. - "email": "A String", # The customer account's contact email. Required for entitlements that create admin.google.com accounts, and serves as the customer's username for those accounts. - "firstName": "A String", # The customer account contact's first name. - "lastName": "A String", # The customer account contact's last name. + "email": "A String", # The customer account's contact email. Required for entitlements that create admin.google.com accounts, and serves as the customer's username for those accounts. Use this email to invite Team customers. + "firstName": "A String", # The customer account contact's first name. Optional for Team customers. + "lastName": "A String", # The customer account contact's last name. Optional for Team customers. "phone": "A String", # The customer account's contact phone number. "title": "A String", # Optional. The customer account contact's job title. }, @@ -762,7 +762,7 @@

Method Details

An object of the form: { # Entity representing a customer of a reseller or distributor. - "alternateEmail": "A String", # Secondary contact email. You need to provide an alternate email to create different domains if a primary contact email already exists. Users will receive a notification with credentials when you create an admin.google.com account. Secondary emails are also recovery email addresses. + "alternateEmail": "A String", # Secondary contact email. You need to provide an alternate email to create different domains if a primary contact email already exists. Users will receive a notification with credentials when you create an admin.google.com account. Secondary emails are also recovery email addresses. Alternate emails are optional when you create Team customers. "channelPartnerId": "A String", # Cloud Identity ID of the customer's channel partner. Populated only if a channel partner exists for this customer. "cloudIdentityId": "A String", # Output only. The customer's Cloud Identity ID if the customer has a Cloud Identity resource. "cloudIdentityInfo": { # Cloud Identity information for the Cloud Channel Customer. # Output only. Cloud Identity information for the customer. Populated only if a Cloud Identity account exists for this customer. @@ -803,9 +803,9 @@

Method Details

}, "primaryContactInfo": { # Contact information for a customer account. # Primary contact info. "displayName": "A String", # Output only. The customer account contact's display name, formatted as a combination of the customer's first and last name. - "email": "A String", # The customer account's contact email. Required for entitlements that create admin.google.com accounts, and serves as the customer's username for those accounts. - "firstName": "A String", # The customer account contact's first name. - "lastName": "A String", # The customer account contact's last name. + "email": "A String", # The customer account's contact email. Required for entitlements that create admin.google.com accounts, and serves as the customer's username for those accounts. Use this email to invite Team customers. + "firstName": "A String", # The customer account contact's first name. Optional for Team customers. + "lastName": "A String", # The customer account contact's last name. Optional for Team customers. "phone": "A String", # The customer account's contact phone number. "title": "A String", # Optional. The customer account contact's job title. }, diff --git a/docs/dyn/cloudidentity_v1.groups.html b/docs/dyn/cloudidentity_v1.groups.html index 9624fbde9e3..cfb73694112 100644 --- a/docs/dyn/cloudidentity_v1.groups.html +++ b/docs/dyn/cloudidentity_v1.groups.html @@ -389,7 +389,7 @@

Method Details

"updateTime": "A String", # Output only. The time when the `Group` was last updated. } - updateMask: string, Required. The fully-qualified names of fields to update. May only contain the following fields: `display_name`, `description`, `labels`. + updateMask: string, Required. The names of fields to update. May only contain the following fields: `display_name`, `description`, `labels`. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format diff --git a/docs/dyn/cloudidentity_v1beta1.groups.html b/docs/dyn/cloudidentity_v1beta1.groups.html index 4b8050758e3..ddec42dab3d 100644 --- a/docs/dyn/cloudidentity_v1beta1.groups.html +++ b/docs/dyn/cloudidentity_v1beta1.groups.html @@ -441,7 +441,7 @@

Method Details

"updateTime": "A String", # Output only. The time when the `Group` was last updated. } - updateMask: string, Required. The fully-qualified names of fields to update. May only contain the following fields: `display_name`, `description`, `labels`. + updateMask: string, Required. The names of fields to update. May only contain the following fields: `display_name`, `description`, `labels`. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format diff --git a/docs/dyn/composer_v1beta1.projects.locations.environments.html b/docs/dyn/composer_v1beta1.projects.locations.environments.html index 7519b7367ba..4543893a354 100644 --- a/docs/dyn/composer_v1beta1.projects.locations.environments.html +++ b/docs/dyn/composer_v1beta1.projects.locations.environments.html @@ -168,6 +168,7 @@

Method Details

"encryptionConfig": { # The encryption options for the Cloud Composer environment and its dependencies. # Optional. The encryption options for the Cloud Composer environment and its dependencies. Cannot be updated. "kmsKeyName": "A String", # Optional. Customer-managed Encryption Key available through Google's Key Management Service. Cannot be updated. If not specified, Google-managed key will be used. }, + "environmentSize": "A String", # Optional. The size of the Cloud Composer environment. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. "gkeCluster": "A String", # Output only. The Kubernetes Engine cluster used to run this environment. "maintenanceWindow": { # The configuration settings for Cloud Composer maintenance window. The following example: { "startTime":"2019-08-01T01:00:00Z" "endTime":"2019-08-01T07:00:00Z" "recurrence":"FREQ=WEEKLY;BYDAY=TU,WE" } would define a maintenance window between 01 and 07 hours UTC during each Tuesday and Wednesday. # Optional. The maintenance window is the period when Cloud Composer components may undergo maintenance. It is defined so that maintenance is not executed during peak hours or critical time periods. The system will not be under maintenance for every occurrence of this window, but when maintenance is planned, it will be scheduled during the window. The maintenance window period must encompass at least 12 hours per week. This may be split into multiple chunks, each with a size of at least 4 hours. If this value is omitted, Cloud Composer components may be subject to maintenance at any time. "endTime": "A String", # Required. Maintenance window end time. It is used only to calculate the duration of the maintenance window. The value for end_time must be in the future, relative to `start_time`. @@ -190,7 +191,7 @@

Method Details

"oauthScopes": [ # Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to ["https://www.googleapis.com/auth/cloud-platform"]. Cannot be updated. "A String", ], - "serviceAccount": "A String", # Optional. The Google Cloud Platform Service Account to be used by the workloads. If a service account is not specified, the "default" Compute Engine service account is used. Cannot be updated . + "serviceAccount": "A String", # Optional. The Google Cloud Platform Service Account to be used by the workloads. If a service account is not specified, the "default" Compute Engine service account is used. Cannot be updated. "subnetwork": "A String", # Optional. The Compute Engine subnetwork to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/regions/{regionId}/subnetworks/{subnetworkId}" If a subnetwork is provided, `nodeConfig.network` must also be provided, and the subnetwork must belong to the enclosing environment's project and location. "tags": [ # Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated. "A String", @@ -198,6 +199,8 @@

Method Details

}, "nodeCount": 42, # The number of nodes in the Kubernetes Engine cluster that will be used to run this environment. "privateEnvironmentConfig": { # The configuration information for configuring a Private IP Cloud Composer environment. # The configuration used for the Private IP Cloud Composer environment. + "cloudComposerNetworkIpv4CidrBlock": "A String", # Optional. The CIDR block from which IP range for Cloud Composer Network in tenant project will be reserved. Needs to be disjoint from private_cluster_config.master_ipv4_cidr_block and cloud_sql_ipv4_cidr_block. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. + "cloudComposerNetworkIpv4ReservedRange": "A String", # Output only. The IP range reserved for the tenant project's Cloud Composer network. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. "cloudSqlIpv4CidrBlock": "A String", # Optional. The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from web_server_ipv4_cidr_block "enablePrivateEnvironment": True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true . "privateClusterConfig": { # Configuration options for the private GKE cluster in a Cloud Composer environment. # Optional. Configuration for the private GKE cluster for a Private IP Cloud Composer environment. @@ -232,6 +235,26 @@

Method Details

}, ], }, + "workloadsConfig": { # The Kubernetes workloads configuration for GKE cluster associated with the Cloud Composer environment. Supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. # Optional. The workloads configuration settings for the GKE cluster associated with the Cloud Composer environment. The GKE cluster runs Airflow scheduler, web server and workers workloads. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. + "scheduler": { # Configuration for resources used by Airflow schedulers. # Optional. Resources used by Airflow schedulers. + "count": 42, # Optional. The number of schedulers. + "cpu": 3.14, # Optional. CPU request and limit for a single Airflow scheduler replica. + "memoryGb": 3.14, # Optional. Memory (GB) request and limit for a single Airflow scheduler replica. + "storageGb": 3.14, # Optional. Storage (GB) request and limit for a single Airflow scheduler replica. + }, + "webServer": { # Configuration for resources used by Airflow web server. # Optional. Resources used by Airflow web server. + "cpu": 3.14, # Optional. CPU request and limit for Airflow web server. + "memoryGb": 3.14, # Optional. Memory (GB) request and limit for Airflow web server. + "storageGb": 3.14, # Optional. Storage (GB) request and limit for Airflow web server. + }, + "worker": { # Configuration for resources used by Airflow workers. # Optional. Resources used by Airflow workers. + "cpu": 3.14, # Optional. CPU request and limit for a single Airflow worker replica. + "maxCount": 42, # Optional. Maximum number of workers for autoscaling. + "memoryGb": 3.14, # Optional. Memory (GB) request and limit for a single Airflow worker replica. + "minCount": 42, # Optional. Minimum number of workers for autoscaling. + "storageGb": 3.14, # Optional. Storage (GB) request and limit for a single Airflow worker replica. + }, + }, }, "createTime": "A String", # Output only. The time at which this environment was created. "labels": { # Optional. User-defined labels for this environment. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally constrained to be <= 128 bytes in size. @@ -331,6 +354,7 @@

Method Details

"encryptionConfig": { # The encryption options for the Cloud Composer environment and its dependencies. # Optional. The encryption options for the Cloud Composer environment and its dependencies. Cannot be updated. "kmsKeyName": "A String", # Optional. Customer-managed Encryption Key available through Google's Key Management Service. Cannot be updated. If not specified, Google-managed key will be used. }, + "environmentSize": "A String", # Optional. The size of the Cloud Composer environment. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. "gkeCluster": "A String", # Output only. The Kubernetes Engine cluster used to run this environment. "maintenanceWindow": { # The configuration settings for Cloud Composer maintenance window. The following example: { "startTime":"2019-08-01T01:00:00Z" "endTime":"2019-08-01T07:00:00Z" "recurrence":"FREQ=WEEKLY;BYDAY=TU,WE" } would define a maintenance window between 01 and 07 hours UTC during each Tuesday and Wednesday. # Optional. The maintenance window is the period when Cloud Composer components may undergo maintenance. It is defined so that maintenance is not executed during peak hours or critical time periods. The system will not be under maintenance for every occurrence of this window, but when maintenance is planned, it will be scheduled during the window. The maintenance window period must encompass at least 12 hours per week. This may be split into multiple chunks, each with a size of at least 4 hours. If this value is omitted, Cloud Composer components may be subject to maintenance at any time. "endTime": "A String", # Required. Maintenance window end time. It is used only to calculate the duration of the maintenance window. The value for end_time must be in the future, relative to `start_time`. @@ -353,7 +377,7 @@

Method Details

"oauthScopes": [ # Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to ["https://www.googleapis.com/auth/cloud-platform"]. Cannot be updated. "A String", ], - "serviceAccount": "A String", # Optional. The Google Cloud Platform Service Account to be used by the workloads. If a service account is not specified, the "default" Compute Engine service account is used. Cannot be updated . + "serviceAccount": "A String", # Optional. The Google Cloud Platform Service Account to be used by the workloads. If a service account is not specified, the "default" Compute Engine service account is used. Cannot be updated. "subnetwork": "A String", # Optional. The Compute Engine subnetwork to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/regions/{regionId}/subnetworks/{subnetworkId}" If a subnetwork is provided, `nodeConfig.network` must also be provided, and the subnetwork must belong to the enclosing environment's project and location. "tags": [ # Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated. "A String", @@ -361,6 +385,8 @@

Method Details

}, "nodeCount": 42, # The number of nodes in the Kubernetes Engine cluster that will be used to run this environment. "privateEnvironmentConfig": { # The configuration information for configuring a Private IP Cloud Composer environment. # The configuration used for the Private IP Cloud Composer environment. + "cloudComposerNetworkIpv4CidrBlock": "A String", # Optional. The CIDR block from which IP range for Cloud Composer Network in tenant project will be reserved. Needs to be disjoint from private_cluster_config.master_ipv4_cidr_block and cloud_sql_ipv4_cidr_block. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. + "cloudComposerNetworkIpv4ReservedRange": "A String", # Output only. The IP range reserved for the tenant project's Cloud Composer network. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. "cloudSqlIpv4CidrBlock": "A String", # Optional. The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from web_server_ipv4_cidr_block "enablePrivateEnvironment": True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true . "privateClusterConfig": { # Configuration options for the private GKE cluster in a Cloud Composer environment. # Optional. Configuration for the private GKE cluster for a Private IP Cloud Composer environment. @@ -395,6 +421,26 @@

Method Details

}, ], }, + "workloadsConfig": { # The Kubernetes workloads configuration for GKE cluster associated with the Cloud Composer environment. Supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. # Optional. The workloads configuration settings for the GKE cluster associated with the Cloud Composer environment. The GKE cluster runs Airflow scheduler, web server and workers workloads. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. + "scheduler": { # Configuration for resources used by Airflow schedulers. # Optional. Resources used by Airflow schedulers. + "count": 42, # Optional. The number of schedulers. + "cpu": 3.14, # Optional. CPU request and limit for a single Airflow scheduler replica. + "memoryGb": 3.14, # Optional. Memory (GB) request and limit for a single Airflow scheduler replica. + "storageGb": 3.14, # Optional. Storage (GB) request and limit for a single Airflow scheduler replica. + }, + "webServer": { # Configuration for resources used by Airflow web server. # Optional. Resources used by Airflow web server. + "cpu": 3.14, # Optional. CPU request and limit for Airflow web server. + "memoryGb": 3.14, # Optional. Memory (GB) request and limit for Airflow web server. + "storageGb": 3.14, # Optional. Storage (GB) request and limit for Airflow web server. + }, + "worker": { # Configuration for resources used by Airflow workers. # Optional. Resources used by Airflow workers. + "cpu": 3.14, # Optional. CPU request and limit for a single Airflow worker replica. + "maxCount": 42, # Optional. Maximum number of workers for autoscaling. + "memoryGb": 3.14, # Optional. Memory (GB) request and limit for a single Airflow worker replica. + "minCount": 42, # Optional. Minimum number of workers for autoscaling. + "storageGb": 3.14, # Optional. Storage (GB) request and limit for a single Airflow worker replica. + }, + }, }, "createTime": "A String", # Output only. The time at which this environment was created. "labels": { # Optional. User-defined labels for this environment. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally constrained to be <= 128 bytes in size. @@ -435,6 +481,7 @@

Method Details

"encryptionConfig": { # The encryption options for the Cloud Composer environment and its dependencies. # Optional. The encryption options for the Cloud Composer environment and its dependencies. Cannot be updated. "kmsKeyName": "A String", # Optional. Customer-managed Encryption Key available through Google's Key Management Service. Cannot be updated. If not specified, Google-managed key will be used. }, + "environmentSize": "A String", # Optional. The size of the Cloud Composer environment. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. "gkeCluster": "A String", # Output only. The Kubernetes Engine cluster used to run this environment. "maintenanceWindow": { # The configuration settings for Cloud Composer maintenance window. The following example: { "startTime":"2019-08-01T01:00:00Z" "endTime":"2019-08-01T07:00:00Z" "recurrence":"FREQ=WEEKLY;BYDAY=TU,WE" } would define a maintenance window between 01 and 07 hours UTC during each Tuesday and Wednesday. # Optional. The maintenance window is the period when Cloud Composer components may undergo maintenance. It is defined so that maintenance is not executed during peak hours or critical time periods. The system will not be under maintenance for every occurrence of this window, but when maintenance is planned, it will be scheduled during the window. The maintenance window period must encompass at least 12 hours per week. This may be split into multiple chunks, each with a size of at least 4 hours. If this value is omitted, Cloud Composer components may be subject to maintenance at any time. "endTime": "A String", # Required. Maintenance window end time. It is used only to calculate the duration of the maintenance window. The value for end_time must be in the future, relative to `start_time`. @@ -457,7 +504,7 @@

Method Details

"oauthScopes": [ # Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to ["https://www.googleapis.com/auth/cloud-platform"]. Cannot be updated. "A String", ], - "serviceAccount": "A String", # Optional. The Google Cloud Platform Service Account to be used by the workloads. If a service account is not specified, the "default" Compute Engine service account is used. Cannot be updated . + "serviceAccount": "A String", # Optional. The Google Cloud Platform Service Account to be used by the workloads. If a service account is not specified, the "default" Compute Engine service account is used. Cannot be updated. "subnetwork": "A String", # Optional. The Compute Engine subnetwork to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/regions/{regionId}/subnetworks/{subnetworkId}" If a subnetwork is provided, `nodeConfig.network` must also be provided, and the subnetwork must belong to the enclosing environment's project and location. "tags": [ # Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated. "A String", @@ -465,6 +512,8 @@

Method Details

}, "nodeCount": 42, # The number of nodes in the Kubernetes Engine cluster that will be used to run this environment. "privateEnvironmentConfig": { # The configuration information for configuring a Private IP Cloud Composer environment. # The configuration used for the Private IP Cloud Composer environment. + "cloudComposerNetworkIpv4CidrBlock": "A String", # Optional. The CIDR block from which IP range for Cloud Composer Network in tenant project will be reserved. Needs to be disjoint from private_cluster_config.master_ipv4_cidr_block and cloud_sql_ipv4_cidr_block. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. + "cloudComposerNetworkIpv4ReservedRange": "A String", # Output only. The IP range reserved for the tenant project's Cloud Composer network. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. "cloudSqlIpv4CidrBlock": "A String", # Optional. The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from web_server_ipv4_cidr_block "enablePrivateEnvironment": True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true . "privateClusterConfig": { # Configuration options for the private GKE cluster in a Cloud Composer environment. # Optional. Configuration for the private GKE cluster for a Private IP Cloud Composer environment. @@ -499,6 +548,26 @@

Method Details

}, ], }, + "workloadsConfig": { # The Kubernetes workloads configuration for GKE cluster associated with the Cloud Composer environment. Supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. # Optional. The workloads configuration settings for the GKE cluster associated with the Cloud Composer environment. The GKE cluster runs Airflow scheduler, web server and workers workloads. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. + "scheduler": { # Configuration for resources used by Airflow schedulers. # Optional. Resources used by Airflow schedulers. + "count": 42, # Optional. The number of schedulers. + "cpu": 3.14, # Optional. CPU request and limit for a single Airflow scheduler replica. + "memoryGb": 3.14, # Optional. Memory (GB) request and limit for a single Airflow scheduler replica. + "storageGb": 3.14, # Optional. Storage (GB) request and limit for a single Airflow scheduler replica. + }, + "webServer": { # Configuration for resources used by Airflow web server. # Optional. Resources used by Airflow web server. + "cpu": 3.14, # Optional. CPU request and limit for Airflow web server. + "memoryGb": 3.14, # Optional. Memory (GB) request and limit for Airflow web server. + "storageGb": 3.14, # Optional. Storage (GB) request and limit for Airflow web server. + }, + "worker": { # Configuration for resources used by Airflow workers. # Optional. Resources used by Airflow workers. + "cpu": 3.14, # Optional. CPU request and limit for a single Airflow worker replica. + "maxCount": 42, # Optional. Maximum number of workers for autoscaling. + "memoryGb": 3.14, # Optional. Memory (GB) request and limit for a single Airflow worker replica. + "minCount": 42, # Optional. Minimum number of workers for autoscaling. + "storageGb": 3.14, # Optional. Storage (GB) request and limit for a single Airflow worker replica. + }, + }, }, "createTime": "A String", # Output only. The time at which this environment was created. "labels": { # Optional. User-defined labels for this environment. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally constrained to be <= 128 bytes in size. @@ -547,6 +616,7 @@

Method Details

"encryptionConfig": { # The encryption options for the Cloud Composer environment and its dependencies. # Optional. The encryption options for the Cloud Composer environment and its dependencies. Cannot be updated. "kmsKeyName": "A String", # Optional. Customer-managed Encryption Key available through Google's Key Management Service. Cannot be updated. If not specified, Google-managed key will be used. }, + "environmentSize": "A String", # Optional. The size of the Cloud Composer environment. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. "gkeCluster": "A String", # Output only. The Kubernetes Engine cluster used to run this environment. "maintenanceWindow": { # The configuration settings for Cloud Composer maintenance window. The following example: { "startTime":"2019-08-01T01:00:00Z" "endTime":"2019-08-01T07:00:00Z" "recurrence":"FREQ=WEEKLY;BYDAY=TU,WE" } would define a maintenance window between 01 and 07 hours UTC during each Tuesday and Wednesday. # Optional. The maintenance window is the period when Cloud Composer components may undergo maintenance. It is defined so that maintenance is not executed during peak hours or critical time periods. The system will not be under maintenance for every occurrence of this window, but when maintenance is planned, it will be scheduled during the window. The maintenance window period must encompass at least 12 hours per week. This may be split into multiple chunks, each with a size of at least 4 hours. If this value is omitted, Cloud Composer components may be subject to maintenance at any time. "endTime": "A String", # Required. Maintenance window end time. It is used only to calculate the duration of the maintenance window. The value for end_time must be in the future, relative to `start_time`. @@ -569,7 +639,7 @@

Method Details

"oauthScopes": [ # Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to ["https://www.googleapis.com/auth/cloud-platform"]. Cannot be updated. "A String", ], - "serviceAccount": "A String", # Optional. The Google Cloud Platform Service Account to be used by the workloads. If a service account is not specified, the "default" Compute Engine service account is used. Cannot be updated . + "serviceAccount": "A String", # Optional. The Google Cloud Platform Service Account to be used by the workloads. If a service account is not specified, the "default" Compute Engine service account is used. Cannot be updated. "subnetwork": "A String", # Optional. The Compute Engine subnetwork to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/regions/{regionId}/subnetworks/{subnetworkId}" If a subnetwork is provided, `nodeConfig.network` must also be provided, and the subnetwork must belong to the enclosing environment's project and location. "tags": [ # Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated. "A String", @@ -577,6 +647,8 @@

Method Details

}, "nodeCount": 42, # The number of nodes in the Kubernetes Engine cluster that will be used to run this environment. "privateEnvironmentConfig": { # The configuration information for configuring a Private IP Cloud Composer environment. # The configuration used for the Private IP Cloud Composer environment. + "cloudComposerNetworkIpv4CidrBlock": "A String", # Optional. The CIDR block from which IP range for Cloud Composer Network in tenant project will be reserved. Needs to be disjoint from private_cluster_config.master_ipv4_cidr_block and cloud_sql_ipv4_cidr_block. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. + "cloudComposerNetworkIpv4ReservedRange": "A String", # Output only. The IP range reserved for the tenant project's Cloud Composer network. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. "cloudSqlIpv4CidrBlock": "A String", # Optional. The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from web_server_ipv4_cidr_block "enablePrivateEnvironment": True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true . "privateClusterConfig": { # Configuration options for the private GKE cluster in a Cloud Composer environment. # Optional. Configuration for the private GKE cluster for a Private IP Cloud Composer environment. @@ -611,6 +683,26 @@

Method Details

}, ], }, + "workloadsConfig": { # The Kubernetes workloads configuration for GKE cluster associated with the Cloud Composer environment. Supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. # Optional. The workloads configuration settings for the GKE cluster associated with the Cloud Composer environment. The GKE cluster runs Airflow scheduler, web server and workers workloads. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. + "scheduler": { # Configuration for resources used by Airflow schedulers. # Optional. Resources used by Airflow schedulers. + "count": 42, # Optional. The number of schedulers. + "cpu": 3.14, # Optional. CPU request and limit for a single Airflow scheduler replica. + "memoryGb": 3.14, # Optional. Memory (GB) request and limit for a single Airflow scheduler replica. + "storageGb": 3.14, # Optional. Storage (GB) request and limit for a single Airflow scheduler replica. + }, + "webServer": { # Configuration for resources used by Airflow web server. # Optional. Resources used by Airflow web server. + "cpu": 3.14, # Optional. CPU request and limit for Airflow web server. + "memoryGb": 3.14, # Optional. Memory (GB) request and limit for Airflow web server. + "storageGb": 3.14, # Optional. Storage (GB) request and limit for Airflow web server. + }, + "worker": { # Configuration for resources used by Airflow workers. # Optional. Resources used by Airflow workers. + "cpu": 3.14, # Optional. CPU request and limit for a single Airflow worker replica. + "maxCount": 42, # Optional. Maximum number of workers for autoscaling. + "memoryGb": 3.14, # Optional. Memory (GB) request and limit for a single Airflow worker replica. + "minCount": 42, # Optional. Minimum number of workers for autoscaling. + "storageGb": 3.14, # Optional. Storage (GB) request and limit for a single Airflow worker replica. + }, + }, }, "createTime": "A String", # Output only. The time at which this environment was created. "labels": { # Optional. User-defined labels for this environment. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally constrained to be <= 128 bytes in size. @@ -622,7 +714,7 @@

Method Details

"uuid": "A String", # Output only. The UUID (Universally Unique IDentifier) associated with this environment. This value is generated when the environment is created. } - updateMask: string, Required. A comma-separated list of paths, relative to `Environment`, of fields to update. For example, to set the version of scikit-learn to install in the environment to 0.19.0 and to remove an existing installation of argparse, the `updateMask` parameter would include the following two `paths` values: "config.softwareConfig.pypiPackages.scikit-learn" and "config.softwareConfig.pypiPackages.argparse". The included patch environment would specify the scikit-learn version as follows: { "config":{ "softwareConfig":{ "pypiPackages":{ "scikit-learn":"==0.19.0" } } } } Note that in the above example, any existing PyPI packages other than scikit-learn and argparse will be unaffected. Only one update type may be included in a single request's `updateMask`. For example, one cannot update both the PyPI packages and labels in the same request. However, it is possible to update multiple members of a map field simultaneously in the same request. For example, to set the labels "label1" and "label2" while clearing "label3" (assuming it already exists), one can provide the paths "labels.label1", "labels.label2", and "labels.label3" and populate the patch environment as follows: { "labels":{ "label1":"new-label1-value" "label2":"new-label2-value" } } Note that in the above example, any existing labels that are not included in the `updateMask` will be unaffected. It is also possible to replace an entire map field by providing the map field's path in the `updateMask`. The new value of the field will be that which is provided in the patch environment. For example, to delete all pre-existing user-specified PyPI packages and install botocore at version 1.7.14, the `updateMask` would contain the path "config.softwareConfig.pypiPackages", and the patch environment would be the following: { "config":{ "softwareConfig":{ "pypiPackages":{ "botocore":"==1.7.14" } } } } *Note:* Only the following fields can be updated: * config.softwareConfig.pypiPackages * Replace all custom custom PyPI packages. If a replacement package map is not included in `environment`, all custom PyPI packages are cleared. It is an error to provide both this mask and a mask specifying an individual package. * config.softwareConfig.pypiPackages.packagename * Update the custom PyPI package packagename, preserving other packages. To delete the package, include it in `updateMask`, and omit the mapping for it in `environment.config.softwareConfig.pypiPackages`. It is an error to provide both a mask of this form and the "config.softwareConfig.pypiPackages" mask. * labels * Replace all environment labels. If a replacement labels map is not included in `environment`, all labels are cleared. It is an error to provide both this mask and a mask specifying one or more individual labels. * labels.labelName * Set the label named labelName, while preserving other labels. To delete the label, include it in `updateMask` and omit its mapping in `environment.labels`. It is an error to provide both a mask of this form and the "labels" mask. * config.nodeCount * Horizontally scale the number of nodes in the environment. An integer greater than or equal to 3 must be provided in the `config.nodeCount` field. * config.webServerNetworkAccessControl * Replace the environment's current WebServerNetworkAccessControl. * config.softwareConfig.airflowConfigOverrides * Replace all Apache Airflow config overrides. If a replacement config overrides map is not included in `environment`, all config overrides are cleared. It is an error to provide both this mask and a mask specifying one or more individual config overrides. * config.softwareConfig.airflowConfigOverrides.section- name * Override the Apache Airflow config property name in the section named section, preserving other properties. To delete the property override, include it in `updateMask` and omit its mapping in `environment.config.softwareConfig.airflowConfigOverrides`. It is an error to provide both a mask of this form and the "config.softwareConfig.airflowConfigOverrides" mask. * config.softwareConfig.envVariables * Replace all environment variables. If a replacement environment variable map is not included in `environment`, all custom environment variables are cleared. It is an error to provide both this mask and a mask specifying one or more individual environment variables. * config.softwareConfig.imageVersion * Upgrade the version of the environment in-place. Refer to `SoftwareConfig.image_version` for information on how to format the new image version. Additionally, the new image version cannot effect a version downgrade and must match the current image version's Composer major version and Airflow major and minor versions. Consult the Cloud Composer Version List for valid values. * config.databaseConfig.machineType * Cloud SQL machine type used by Airflow database. It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 or db-n1-standard-16. * config.webServerConfig.machineType * Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. * config.maintenanceWindow * Maintenance window during which Cloud Composer components may be under maintenance. + updateMask: string, Required. A comma-separated list of paths, relative to `Environment`, of fields to update. For example, to set the version of scikit-learn to install in the environment to 0.19.0 and to remove an existing installation of argparse, the `updateMask` parameter would include the following two `paths` values: "config.softwareConfig.pypiPackages.scikit-learn" and "config.softwareConfig.pypiPackages.argparse". The included patch environment would specify the scikit-learn version as follows: { "config":{ "softwareConfig":{ "pypiPackages":{ "scikit-learn":"==0.19.0" } } } } Note that in the above example, any existing PyPI packages other than scikit-learn and argparse will be unaffected. Only one update type may be included in a single request's `updateMask`. For example, one cannot update both the PyPI packages and labels in the same request. However, it is possible to update multiple members of a map field simultaneously in the same request. For example, to set the labels "label1" and "label2" while clearing "label3" (assuming it already exists), one can provide the paths "labels.label1", "labels.label2", and "labels.label3" and populate the patch environment as follows: { "labels":{ "label1":"new-label1-value" "label2":"new-label2-value" } } Note that in the above example, any existing labels that are not included in the `updateMask` will be unaffected. It is also possible to replace an entire map field by providing the map field's path in the `updateMask`. The new value of the field will be that which is provided in the patch environment. For example, to delete all pre-existing user-specified PyPI packages and install botocore at version 1.7.14, the `updateMask` would contain the path "config.softwareConfig.pypiPackages", and the patch environment would be the following: { "config":{ "softwareConfig":{ "pypiPackages":{ "botocore":"==1.7.14" } } } } *Note:* Only the following fields can be updated: * config.softwareConfig.pypiPackages * Replace all custom custom PyPI packages. If a replacement package map is not included in `environment`, all custom PyPI packages are cleared. It is an error to provide both this mask and a mask specifying an individual package. * config.softwareConfig.pypiPackages.packagename * Update the custom PyPI package packagename, preserving other packages. To delete the package, include it in `updateMask`, and omit the mapping for it in `environment.config.softwareConfig.pypiPackages`. It is an error to provide both a mask of this form and the "config.softwareConfig.pypiPackages" mask. * labels * Replace all environment labels. If a replacement labels map is not included in `environment`, all labels are cleared. It is an error to provide both this mask and a mask specifying one or more individual labels. * labels.labelName * Set the label named labelName, while preserving other labels. To delete the label, include it in `updateMask` and omit its mapping in `environment.labels`. It is an error to provide both a mask of this form and the "labels" mask. * config.nodeCount * Horizontally scale the number of nodes in the environment. An integer greater than or equal to 3 must be provided in the `config.nodeCount` field. * config.webServerNetworkAccessControl * Replace the environment's current WebServerNetworkAccessControl. * config.softwareConfig.airflowConfigOverrides * Replace all Apache Airflow config overrides. If a replacement config overrides map is not included in `environment`, all config overrides are cleared. It is an error to provide both this mask and a mask specifying one or more individual config overrides. * config.softwareConfig.airflowConfigOverrides.section- name * Override the Apache Airflow config property name in the section named section, preserving other properties. To delete the property override, include it in `updateMask` and omit its mapping in `environment.config.softwareConfig.airflowConfigOverrides`. It is an error to provide both a mask of this form and the "config.softwareConfig.airflowConfigOverrides" mask. * config.softwareConfig.envVariables * Replace all environment variables. If a replacement environment variable map is not included in `environment`, all custom environment variables are cleared. It is an error to provide both this mask and a mask specifying one or more individual environment variables. * config.softwareConfig.imageVersion * Upgrade the version of the environment in-place. Refer to `SoftwareConfig.image_version` for information on how to format the new image version. Additionally, the new image version cannot effect a version downgrade and must match the current image version's Composer major version and Airflow major and minor versions. Consult the Cloud Composer Version List for valid values. * config.softwareConfig.schedulerCount * Horizontally scale the number of schedulers in Airflow. A positive integer not greater than the number of nodes must be provided in the `config.softwareConfig.schedulerCount` field. * config.databaseConfig.machineType * Cloud SQL machine type used by Airflow database. It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 or db-n1-standard-16. * config.webServerConfig.machineType * Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. * config.maintenanceWindow * Maintenance window during which Cloud Composer components may be under maintenance. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format diff --git a/docs/dyn/container_v1.projects.locations.clusters.html b/docs/dyn/container_v1.projects.locations.clusters.html index ef9c060c2b5..17ef77b5cca 100644 --- a/docs/dyn/container_v1.projects.locations.clusters.html +++ b/docs/dyn/container_v1.projects.locations.clusters.html @@ -2587,6 +2587,10 @@

Method Details

"disabled": True or False, # Whether NetworkPolicy is enabled for this cluster. }, }, + "desiredAuthenticatorGroupsConfig": { # Configuration for returning group information from authenticators. # The desired authenticator groups config for the cluster. + "enabled": True or False, # Whether this cluster should return group membership lookups during authentication using a group of security groups. + "securityGroup": "A String", # The name of the security group-of-groups to be used. Only relevant if enabled = true. + }, "desiredAutopilot": { # Autopilot is the configuration for Autopilot settings on the cluster. # The desired Autopilot configuration for the cluster. "enabled": True or False, # Enable Autopilot }, diff --git a/docs/dyn/container_v1.projects.zones.clusters.html b/docs/dyn/container_v1.projects.zones.clusters.html index d42d6d3930d..75a42f937d1 100644 --- a/docs/dyn/container_v1.projects.zones.clusters.html +++ b/docs/dyn/container_v1.projects.zones.clusters.html @@ -2648,6 +2648,10 @@

Method Details

"disabled": True or False, # Whether NetworkPolicy is enabled for this cluster. }, }, + "desiredAuthenticatorGroupsConfig": { # Configuration for returning group information from authenticators. # The desired authenticator groups config for the cluster. + "enabled": True or False, # Whether this cluster should return group membership lookups during authentication using a group of security groups. + "securityGroup": "A String", # The name of the security group-of-groups to be used. Only relevant if enabled = true. + }, "desiredAutopilot": { # Autopilot is the configuration for Autopilot settings on the cluster. # The desired Autopilot configuration for the cluster. "enabled": True or False, # Enable Autopilot }, diff --git a/docs/dyn/container_v1beta1.projects.locations.clusters.html b/docs/dyn/container_v1beta1.projects.locations.clusters.html index 1a07e5d7ec4..b3c44980b14 100644 --- a/docs/dyn/container_v1beta1.projects.locations.clusters.html +++ b/docs/dyn/container_v1beta1.projects.locations.clusters.html @@ -2775,6 +2775,10 @@

Method Details

"disabled": True or False, # Whether NetworkPolicy is enabled for this cluster. }, }, + "desiredAuthenticatorGroupsConfig": { # Configuration for returning group information from authenticators. # AuthenticatorGroupsConfig specifies the config for the cluster security groups settings. + "enabled": True or False, # Whether this cluster should return group membership lookups during authentication using a group of security groups. + "securityGroup": "A String", # The name of the security group-of-groups to be used. Only relevant if enabled = true. + }, "desiredAutopilot": { # Autopilot is the configuration for Autopilot settings on the cluster. # The desired Autopilot configuration for the cluster. "enabled": True or False, # Enable Autopilot }, diff --git a/docs/dyn/container_v1beta1.projects.zones.clusters.html b/docs/dyn/container_v1beta1.projects.zones.clusters.html index aae3d65bb6d..509157d1591 100644 --- a/docs/dyn/container_v1beta1.projects.zones.clusters.html +++ b/docs/dyn/container_v1beta1.projects.zones.clusters.html @@ -2836,6 +2836,10 @@

Method Details

"disabled": True or False, # Whether NetworkPolicy is enabled for this cluster. }, }, + "desiredAuthenticatorGroupsConfig": { # Configuration for returning group information from authenticators. # AuthenticatorGroupsConfig specifies the config for the cluster security groups settings. + "enabled": True or False, # Whether this cluster should return group membership lookups during authentication using a group of security groups. + "securityGroup": "A String", # The name of the security group-of-groups to be used. Only relevant if enabled = true. + }, "desiredAutopilot": { # Autopilot is the configuration for Autopilot settings on the cluster. # The desired Autopilot configuration for the cluster. "enabled": True or False, # Enable Autopilot }, diff --git a/docs/dyn/dataflow_v1b3.projects.jobs.messages.html b/docs/dyn/dataflow_v1b3.projects.jobs.messages.html index 43aa3c41d77..e3c1807a527 100644 --- a/docs/dyn/dataflow_v1b3.projects.jobs.messages.html +++ b/docs/dyn/dataflow_v1b3.projects.jobs.messages.html @@ -134,7 +134,7 @@

Method Details

"eventType": "A String", # The type of autoscaling event to report. "targetNumWorkers": "A String", # The target number of workers the worker pool wants to resize to use. "time": "A String", # The time this event was emitted to indicate a new target or current num_workers value. - "workerPool": "A String", # A short and friendly name for the worker pool this event refers to, populated from the value of PoolStageRelation::user_pool_name. + "workerPool": "A String", # A short and friendly name for the worker pool this event refers to. }, ], "jobMessages": [ # Messages in ascending timestamp order. diff --git a/docs/dyn/dataflow_v1b3.projects.locations.jobs.messages.html b/docs/dyn/dataflow_v1b3.projects.locations.jobs.messages.html index f6791539343..a010ba6ee17 100644 --- a/docs/dyn/dataflow_v1b3.projects.locations.jobs.messages.html +++ b/docs/dyn/dataflow_v1b3.projects.locations.jobs.messages.html @@ -134,7 +134,7 @@

Method Details

"eventType": "A String", # The type of autoscaling event to report. "targetNumWorkers": "A String", # The target number of workers the worker pool wants to resize to use. "time": "A String", # The time this event was emitted to indicate a new target or current num_workers value. - "workerPool": "A String", # A short and friendly name for the worker pool this event refers to, populated from the value of PoolStageRelation::user_pool_name. + "workerPool": "A String", # A short and friendly name for the worker pool this event refers to. }, ], "jobMessages": [ # Messages in ascending timestamp order. diff --git a/docs/dyn/datafusion_v1.projects.locations.html b/docs/dyn/datafusion_v1.projects.locations.html index b79aa551bf6..6c1f190441d 100644 --- a/docs/dyn/datafusion_v1.projects.locations.html +++ b/docs/dyn/datafusion_v1.projects.locations.html @@ -142,7 +142,7 @@

Method Details

name: string, The resource that owns the locations collection, if applicable. (required) filter: string, A filter to narrow down results to a preferred subset. The filtering language accepts strings like "displayName=tokyo", and is documented in more detail in [AIP-160](https://google.aip.dev/160). includeUnrevealedLocations: boolean, If true, the returned list will include locations which are not yet revealed. - pageSize: integer, The maximum number of results to return. If not set, the service will select a default. + pageSize: integer, The maximum number of results to return. If not set, the service selects a default. pageToken: string, A page token received from the `next_page_token` field in the response. Send that page token to receive the subsequent page. x__xgafv: string, V1 error format. Allowed values diff --git a/docs/dyn/datafusion_v1.projects.locations.instances.html b/docs/dyn/datafusion_v1.projects.locations.instances.html index 89b8cbe7994..adfbb0aac30 100644 --- a/docs/dyn/datafusion_v1.projects.locations.instances.html +++ b/docs/dyn/datafusion_v1.projects.locations.instances.html @@ -140,6 +140,9 @@

Method Details

}, ], "createTime": "A String", # Output only. The time the instance was created. + "cryptoKeyConfig": { # The crypto key configuration. This field is used by the Customer-managed encryption keys (CMEK) feature. # The crypto key configuration. This field is used by the Customer-Managed Encryption Keys (CMEK) feature. + "keyReference": "A String", # The name of the key which is used to encrypt/decrypt customer data. For key in Cloud KMS, the key should be in the format of `projects/*/locations/*/keyRings/*/cryptoKeys/*`. + }, "dataprocServiceAccount": "A String", # User-managed service account to set on Dataproc when Cloud Data Fusion creates Dataproc to run data processing pipelines. This allows users to have fine-grained access control on Dataproc's accesses to cloud resources. "description": "A String", # A description of this instance. "displayName": "A String", # Display name for an instance. @@ -147,7 +150,7 @@

Method Details

"enableStackdriverLogging": True or False, # Option to enable Stackdriver Logging. "enableStackdriverMonitoring": True or False, # Option to enable Stackdriver Monitoring. "gcsBucket": "A String", # Output only. Cloud Storage bucket generated by Data Fusion in the customer project. - "labels": { # The resource labels for instance to use to annotate any related underlying resources such as GCE VMs. The character '=' is not allowed to be used within the labels. + "labels": { # The resource labels for instance to use to annotate any related underlying resources such as Compute Engine VMs. The character '=' is not allowed to be used within the labels. "a_key": "A String", }, "name": "A String", # Output only. The name of this instance is in the form of projects/{project}/locations/{location}/instances/{instance}. @@ -268,6 +271,9 @@

Method Details

}, ], "createTime": "A String", # Output only. The time the instance was created. + "cryptoKeyConfig": { # The crypto key configuration. This field is used by the Customer-managed encryption keys (CMEK) feature. # The crypto key configuration. This field is used by the Customer-Managed Encryption Keys (CMEK) feature. + "keyReference": "A String", # The name of the key which is used to encrypt/decrypt customer data. For key in Cloud KMS, the key should be in the format of `projects/*/locations/*/keyRings/*/cryptoKeys/*`. + }, "dataprocServiceAccount": "A String", # User-managed service account to set on Dataproc when Cloud Data Fusion creates Dataproc to run data processing pipelines. This allows users to have fine-grained access control on Dataproc's accesses to cloud resources. "description": "A String", # A description of this instance. "displayName": "A String", # Display name for an instance. @@ -275,7 +281,7 @@

Method Details

"enableStackdriverLogging": True or False, # Option to enable Stackdriver Logging. "enableStackdriverMonitoring": True or False, # Option to enable Stackdriver Monitoring. "gcsBucket": "A String", # Output only. Cloud Storage bucket generated by Data Fusion in the customer project. - "labels": { # The resource labels for instance to use to annotate any related underlying resources such as GCE VMs. The character '=' is not allowed to be used within the labels. + "labels": { # The resource labels for instance to use to annotate any related underlying resources such as Compute Engine VMs. The character '=' is not allowed to be used within the labels. "a_key": "A String", }, "name": "A String", # Output only. The name of this instance is in the form of projects/{project}/locations/{location}/instances/{instance}. @@ -386,6 +392,9 @@

Method Details

}, ], "createTime": "A String", # Output only. The time the instance was created. + "cryptoKeyConfig": { # The crypto key configuration. This field is used by the Customer-managed encryption keys (CMEK) feature. # The crypto key configuration. This field is used by the Customer-Managed Encryption Keys (CMEK) feature. + "keyReference": "A String", # The name of the key which is used to encrypt/decrypt customer data. For key in Cloud KMS, the key should be in the format of `projects/*/locations/*/keyRings/*/cryptoKeys/*`. + }, "dataprocServiceAccount": "A String", # User-managed service account to set on Dataproc when Cloud Data Fusion creates Dataproc to run data processing pipelines. This allows users to have fine-grained access control on Dataproc's accesses to cloud resources. "description": "A String", # A description of this instance. "displayName": "A String", # Display name for an instance. @@ -393,7 +402,7 @@

Method Details

"enableStackdriverLogging": True or False, # Option to enable Stackdriver Logging. "enableStackdriverMonitoring": True or False, # Option to enable Stackdriver Monitoring. "gcsBucket": "A String", # Output only. Cloud Storage bucket generated by Data Fusion in the customer project. - "labels": { # The resource labels for instance to use to annotate any related underlying resources such as GCE VMs. The character '=' is not allowed to be used within the labels. + "labels": { # The resource labels for instance to use to annotate any related underlying resources such as Compute Engine VMs. The character '=' is not allowed to be used within the labels. "a_key": "A String", }, "name": "A String", # Output only. The name of this instance is in the form of projects/{project}/locations/{location}/instances/{instance}. @@ -465,6 +474,9 @@

Method Details

}, ], "createTime": "A String", # Output only. The time the instance was created. + "cryptoKeyConfig": { # The crypto key configuration. This field is used by the Customer-managed encryption keys (CMEK) feature. # The crypto key configuration. This field is used by the Customer-Managed Encryption Keys (CMEK) feature. + "keyReference": "A String", # The name of the key which is used to encrypt/decrypt customer data. For key in Cloud KMS, the key should be in the format of `projects/*/locations/*/keyRings/*/cryptoKeys/*`. + }, "dataprocServiceAccount": "A String", # User-managed service account to set on Dataproc when Cloud Data Fusion creates Dataproc to run data processing pipelines. This allows users to have fine-grained access control on Dataproc's accesses to cloud resources. "description": "A String", # A description of this instance. "displayName": "A String", # Display name for an instance. @@ -472,7 +484,7 @@

Method Details

"enableStackdriverLogging": True or False, # Option to enable Stackdriver Logging. "enableStackdriverMonitoring": True or False, # Option to enable Stackdriver Monitoring. "gcsBucket": "A String", # Output only. Cloud Storage bucket generated by Data Fusion in the customer project. - "labels": { # The resource labels for instance to use to annotate any related underlying resources such as GCE VMs. The character '=' is not allowed to be used within the labels. + "labels": { # The resource labels for instance to use to annotate any related underlying resources such as Compute Engine VMs. The character '=' is not allowed to be used within the labels. "a_key": "A String", }, "name": "A String", # Output only. The name of this instance is in the form of projects/{project}/locations/{location}/instances/{instance}. diff --git a/docs/dyn/datafusion_v1beta1.projects.locations.html b/docs/dyn/datafusion_v1beta1.projects.locations.html index d592ad58f83..f6ab20ad0be 100644 --- a/docs/dyn/datafusion_v1beta1.projects.locations.html +++ b/docs/dyn/datafusion_v1beta1.projects.locations.html @@ -145,7 +145,7 @@

Method Details

name: string, The resource that owns the locations collection, if applicable. (required) filter: string, A filter to narrow down results to a preferred subset. The filtering language accepts strings like "displayName=tokyo", and is documented in more detail in [AIP-160](https://google.aip.dev/160). includeUnrevealedLocations: boolean, If true, the returned list will include locations which are not yet revealed. - pageSize: integer, The maximum number of results to return. If not set, the service will select a default. + pageSize: integer, The maximum number of results to return. If not set, the service selects a default. pageToken: string, A page token received from the `next_page_token` field in the response. Send that page token to receive the subsequent page. x__xgafv: string, V1 error format. Allowed values diff --git a/docs/dyn/datafusion_v1beta1.projects.locations.instances.dnsPeerings.html b/docs/dyn/datafusion_v1beta1.projects.locations.instances.dnsPeerings.html new file mode 100644 index 00000000000..df552c598f6 --- /dev/null +++ b/docs/dyn/datafusion_v1beta1.projects.locations.instances.dnsPeerings.html @@ -0,0 +1,198 @@ + + + +

Cloud Data Fusion API . projects . locations . instances . dnsPeerings

+

Instance Methods

+

+ add(parent, body=None, x__xgafv=None)

+

Add DNS peering on the given resource.

+

+ close()

+

Close httplib2 connections.

+

+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

List DNS peering for a given resource.

+

+ list_next(previous_request, previous_response)

+

Retrieves the next page of results.

+

+ remove(parent, body=None, x__xgafv=None)

+

Remove DNS peering on the given resource.

+

Method Details

+
+ add(parent, body=None, x__xgafv=None) +
Add DNS peering on the given resource.
+
+Args:
+  parent: string, The resource on which DNS peering will be created. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message to create dns peering.
+  "dnsPeering": { # DNS peering configuration. These configurations are used to create DNS peering with the customer Cloud DNS. # Dns peering config.
+    "description": "A String", # Optional. Optional description of the dns zone.
+    "domain": "A String", # Required. Name of the dns.
+    "targetNetwork": "A String", # Optional. Optional target network to which dns peering should happen.
+    "targetProject": "A String", # Optional. Optional target project to which dns peering should happen.
+    "zone": "A String", # Required. Name of the zone.
+  },
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for set dns peering method.
+}
+
+ +
+ close() +
Close httplib2 connections.
+
+ +
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None) +
List DNS peering for a given resource.
+
+Args:
+  parent: string, Required. The resource on which dns peering will be listed. (required)
+  pageSize: integer, The maximum number of items to return.
+  pageToken: string, The next_page_token value to use if there are additional results to retrieve for this list request.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # List dns peering response.
+  "dnsPeerings": [ # List of dns peering configs.
+    { # DNS peering configuration. These configurations are used to create DNS peering with the customer Cloud DNS.
+      "description": "A String", # Optional. Optional description of the dns zone.
+      "domain": "A String", # Required. Name of the dns.
+      "targetNetwork": "A String", # Optional. Optional target network to which dns peering should happen.
+      "targetProject": "A String", # Optional. Optional target project to which dns peering should happen.
+      "zone": "A String", # Required. Name of the zone.
+    },
+  ],
+  "nextPageToken": "A String", # Token to retrieve the next page of results or empty if there are no more results in the list.
+}
+
+ +
+ list_next(previous_request, previous_response) +
Retrieves the next page of results.
+
+Args:
+  previous_request: The request for the previous page. (required)
+  previous_response: The response from the request for the previous page. (required)
+
+Returns:
+  A request object that you can call 'execute()' on to request the next
+  page. Returns None if there are no more items in the collection.
+    
+
+ +
+ remove(parent, body=None, x__xgafv=None) +
Remove DNS peering on the given resource.
+
+Args:
+  parent: string, The resource on which DNS peering will be removed. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message to remove dns peering.
+  "zone": "A String", # Required. The zone to be removed.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for set dns peering method.
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/datafusion_v1beta1.projects.locations.instances.html b/docs/dyn/datafusion_v1beta1.projects.locations.instances.html index 508fc80cc67..c307fce4aa4 100644 --- a/docs/dyn/datafusion_v1beta1.projects.locations.instances.html +++ b/docs/dyn/datafusion_v1beta1.projects.locations.instances.html @@ -74,6 +74,11 @@

Cloud Data Fusion API . projects . locations . instances

Instance Methods

+

+ dnsPeerings() +

+

Returns the dnsPeerings Resource.

+

namespaces()

@@ -147,6 +152,9 @@

Method Details

}, ], "createTime": "A String", # Output only. The time the instance was created. + "cryptoKeyConfig": { # The crypto key configuration. This field is used by the Customer-managed encryption keys (CMEK) feature. # The crypto key configuration. This field is used by the Customer-Managed Encryption Keys (CMEK) feature. + "keyReference": "A String", # The name of the key which is used to encrypt/decrypt customer data. For key in Cloud KMS, the key should be in the format of `projects/*/locations/*/keyRings/*/cryptoKeys/*`. + }, "dataprocServiceAccount": "A String", # User-managed service account to set on Dataproc when Cloud Data Fusion creates Dataproc to run data processing pipelines. This allows users to have fine-grained access control on Dataproc's accesses to cloud resources. "description": "A String", # A description of this instance. "displayName": "A String", # Display name for an instance. @@ -274,6 +282,9 @@

Method Details

}, ], "createTime": "A String", # Output only. The time the instance was created. + "cryptoKeyConfig": { # The crypto key configuration. This field is used by the Customer-managed encryption keys (CMEK) feature. # The crypto key configuration. This field is used by the Customer-Managed Encryption Keys (CMEK) feature. + "keyReference": "A String", # The name of the key which is used to encrypt/decrypt customer data. For key in Cloud KMS, the key should be in the format of `projects/*/locations/*/keyRings/*/cryptoKeys/*`. + }, "dataprocServiceAccount": "A String", # User-managed service account to set on Dataproc when Cloud Data Fusion creates Dataproc to run data processing pipelines. This allows users to have fine-grained access control on Dataproc's accesses to cloud resources. "description": "A String", # A description of this instance. "displayName": "A String", # Display name for an instance. @@ -391,6 +402,9 @@

Method Details

}, ], "createTime": "A String", # Output only. The time the instance was created. + "cryptoKeyConfig": { # The crypto key configuration. This field is used by the Customer-managed encryption keys (CMEK) feature. # The crypto key configuration. This field is used by the Customer-Managed Encryption Keys (CMEK) feature. + "keyReference": "A String", # The name of the key which is used to encrypt/decrypt customer data. For key in Cloud KMS, the key should be in the format of `projects/*/locations/*/keyRings/*/cryptoKeys/*`. + }, "dataprocServiceAccount": "A String", # User-managed service account to set on Dataproc when Cloud Data Fusion creates Dataproc to run data processing pipelines. This allows users to have fine-grained access control on Dataproc's accesses to cloud resources. "description": "A String", # A description of this instance. "displayName": "A String", # Display name for an instance. @@ -469,6 +483,9 @@

Method Details

}, ], "createTime": "A String", # Output only. The time the instance was created. + "cryptoKeyConfig": { # The crypto key configuration. This field is used by the Customer-managed encryption keys (CMEK) feature. # The crypto key configuration. This field is used by the Customer-Managed Encryption Keys (CMEK) feature. + "keyReference": "A String", # The name of the key which is used to encrypt/decrypt customer data. For key in Cloud KMS, the key should be in the format of `projects/*/locations/*/keyRings/*/cryptoKeys/*`. + }, "dataprocServiceAccount": "A String", # User-managed service account to set on Dataproc when Cloud Data Fusion creates Dataproc to run data processing pipelines. This allows users to have fine-grained access control on Dataproc's accesses to cloud resources. "description": "A String", # A description of this instance. "displayName": "A String", # Display name for an instance. diff --git a/docs/dyn/datafusion_v1beta1.projects.locations.instances.namespaces.html b/docs/dyn/datafusion_v1beta1.projects.locations.instances.namespaces.html index 45aa4f8d8e8..2d9cd9121d2 100644 --- a/docs/dyn/datafusion_v1beta1.projects.locations.instances.namespaces.html +++ b/docs/dyn/datafusion_v1beta1.projects.locations.instances.namespaces.html @@ -80,6 +80,12 @@

Instance Methods

getIamPolicy(resource, options_requestedPolicyVersion=None, x__xgafv=None)

Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.

+

+ list(parent, pageSize=None, pageToken=None, view=None, x__xgafv=None)

+

List namespaces in a given instance

+

+ list_next(previous_request, previous_response)

+

Retrieves the next page of results.

setIamPolicy(resource, body=None, x__xgafv=None)

Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.

@@ -140,6 +146,93 @@

Method Details

}
+
+ list(parent, pageSize=None, pageToken=None, view=None, x__xgafv=None) +
List namespaces in a given instance
+
+Args:
+  parent: string, Required. The instance to list its namespaces. (required)
+  pageSize: integer, The maximum number of items to return.
+  pageToken: string, The next_page_token value to use if there are additional results to retrieve for this list request.
+  view: string, By default, only basic information about a namespace is returned (e.g. name). When `NAMESPACE_VIEW_FULL` is specified, additional information associated with a namespace gets returned (e.g. IAM policy set on the namespace)
+    Allowed values
+      NAMESPACE_VIEW_UNSPECIFIED - Default/unset value, which will use BASIC view.
+      NAMESPACE_VIEW_BASIC - Show the most basic metadata of a namespace
+      NAMESPACE_VIEW_FULL - Returns all metadata of a namespace
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # List namespaces response.
+  "namespaces": [ # List of namespaces
+    { # Represents the information of a namespace
+      "iamPolicy": { # IAMPolicy encapsulates the IAM policy name, definition and status of policy fetching. # IAM policy associated with this namespace.
+        "policy": { # An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/). # Policy definition if IAM policy fetching is successful, otherwise empty.
+          "auditConfigs": [ # Specifies cloud audit logging configuration for this policy.
+            { # Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { "log_type": "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ "user:aliya@example.com" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging, and aliya@example.com from DATA_WRITE logging.
+              "auditLogConfigs": [ # The configuration for logging of each type of permission.
+                { # Provides the configuration for logging a type of permissions. Example: { "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.
+                  "exemptedMembers": [ # Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.
+                    "A String",
+                  ],
+                  "logType": "A String", # The log type that this config enables.
+                },
+              ],
+              "service": "A String", # Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.
+            },
+          ],
+          "bindings": [ # Associates a list of `members` to a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one member.
+            { # Associates `members` with a `role`.
+              "condition": { # Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information. # The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the members in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+                "description": "A String", # Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
+                "expression": "A String", # Textual representation of an expression in Common Expression Language syntax.
+                "location": "A String", # Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
+                "title": "A String", # Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
+              },
+              "members": [ # Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`.
+                "A String",
+              ],
+              "role": "A String", # Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
+            },
+          ],
+          "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
+          "version": 42, # Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+        },
+        "status": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Status of iam policy fetching.
+          "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+          "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+            {
+              "a_key": "", # Properties of the object. Contains field @type with type URL.
+            },
+          ],
+          "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+        },
+      },
+      "name": "A String", # Name of the given namespace.
+    },
+  ],
+  "nextPageToken": "A String", # Token to retrieve the next page of results or empty if there are no more results in the list.
+}
+
+ +
+ list_next(previous_request, previous_response) +
Retrieves the next page of results.
+
+Args:
+  previous_request: The request for the previous page. (required)
+  previous_response: The response from the request for the previous page. (required)
+
+Returns:
+  A request object that you can call 'execute()' on to request the next
+  page. Returns None if there are no more items in the collection.
+    
+
+
setIamPolicy(resource, body=None, x__xgafv=None)
Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
diff --git a/docs/dyn/dataproc_v1.projects.regions.clusters.html b/docs/dyn/dataproc_v1.projects.regions.clusters.html
index 82bb30fe735..99d21e306ea 100644
--- a/docs/dyn/dataproc_v1.projects.regions.clusters.html
+++ b/docs/dyn/dataproc_v1.projects.regions.clusters.html
@@ -104,6 +104,9 @@ 

Instance Methods

patch(projectId, region, clusterName, body=None, gracefulDecommissionTimeout=None, requestId=None, updateMask=None, x__xgafv=None)

Updates a cluster in a project. The returned Operation.metadata will be ClusterOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). The cluster must be in a RUNNING state or an error is returned.

+

+ repair(projectId, region, clusterName, body=None, x__xgafv=None)

+

Repairs a cluster.

setIamPolicy(resource, body=None, x__xgafv=None)

Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.

@@ -370,7 +373,7 @@

Method Details

], } - requestId: string, Optional. A unique id used to identify the request. If the server receives two CreateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. + requestId: string, Optional. A unique ID used to identify the request. If the server receives two CreateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -409,7 +412,7 @@

Method Details

region: string, Required. The Dataproc region in which to handle the request. (required) clusterName: string, Required. The cluster name. (required) clusterUuid: string, Optional. Specifying the cluster_uuid means the RPC should fail (with error NOT_FOUND) if cluster with specified UUID does not exist. - requestId: string, Optional. A unique id used to identify the request. If the server receives two DeleteClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. + requestId: string, Optional. A unique ID used to identify the request. If the server receives two DeleteClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -1351,7 +1354,7 @@

Method Details

} gracefulDecommissionTimeout: string, Optional. Timeout for graceful YARN decomissioning. Graceful decommissioning allows removing nodes from the cluster without interrupting jobs in progress. Timeout specifies how long to wait for jobs in progress to finish before forcefully removing nodes (and potentially interrupting jobs). Default timeout is 0 (for forceful decommission), and the maximum allowed timeout is 1 day. (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).Only supported on Dataproc image versions 1.2 and higher. - requestId: string, Optional. A unique id used to identify the request. If the server receives two UpdateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. + requestId: string, Optional. A unique ID used to identify the request. If the server receives two UpdateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. updateMask: string, Required. Specifies the path, relative to Cluster, of the field to update. For example, to change the number of workers in a cluster to 5, the update_mask parameter would be specified as config.worker_config.num_instances, and the PATCH request body would specify the new value, as follows: { "config":{ "workerConfig":{ "numInstances":"5" } } } Similarly, to change the number of preemptible workers in a cluster to 5, the update_mask parameter would be config.secondary_worker_config.num_instances, and the PATCH request body would be set as follows: { "config":{ "secondaryWorkerConfig":{ "numInstances":"5" } } } *Note:* Currently, only the following fields can be updated: *Mask* *Purpose* *labels* Update labels *config.worker_config.num_instances* Resize primary worker group *config.secondary_worker_config.num_instances* Resize secondary worker group config.autoscaling_config.policy_uri Use, stop using, or change autoscaling policies x__xgafv: string, V1 error format. Allowed values @@ -1382,6 +1385,51 @@

Method Details

}
+
+ repair(projectId, region, clusterName, body=None, x__xgafv=None) +
Repairs a cluster.
+
+Args:
+  projectId: string, Required. The ID of the Google Cloud Platform project the cluster belongs to. (required)
+  region: string, Required. The Dataproc region in which to handle the request. (required)
+  clusterName: string, Required. The cluster name. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # A request to repair a cluster.
+  "clusterUuid": "A String", # Optional. Specifying the cluster_uuid means the RPC will fail (with error NOT_FOUND) if a cluster with the specified UUID does not exist.
+  "requestId": "A String", # Optional. A unique ID used to identify the request. If the server receives two RepairClusterRequests with the same ID, the second request is ignored, and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.
+  "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}.
+  "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+
setIamPolicy(resource, body=None, x__xgafv=None)
Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.
@@ -1453,7 +1501,7 @@ 

Method Details

{ # A request to start a cluster. "clusterUuid": "A String", # Optional. Specifying the cluster_uuid means the RPC will fail (with error NOT_FOUND) if a cluster with the specified UUID does not exist. - "requestId": "A String", # Optional. A unique id used to identify the request. If the server receives two StartClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. + "requestId": "A String", # Optional. A unique ID used to identify the request. If the server receives two StartClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. } x__xgafv: string, V1 error format. @@ -1498,7 +1546,7 @@

Method Details

{ # A request to stop a cluster. "clusterUuid": "A String", # Optional. Specifying the cluster_uuid means the RPC will fail (with error NOT_FOUND) if a cluster with the specified UUID does not exist. - "requestId": "A String", # Optional. A unique id used to identify the request. If the server receives two StopClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. + "requestId": "A String", # Optional. A unique ID used to identify the request. If the server receives two StopClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. } x__xgafv: string, V1 error format. diff --git a/docs/dyn/datastore_v1.projects.html b/docs/dyn/datastore_v1.projects.html index 96acf9cb890..4f71a3fd441 100644 --- a/docs/dyn/datastore_v1.projects.html +++ b/docs/dyn/datastore_v1.projects.html @@ -248,40 +248,7 @@

Method Details

], }, "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. - "a_key": { # A message that can hold any of the supported value types and associated metadata. - "arrayValue": { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`. - "values": [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for 'exclude_from_indexes'. - # Object with schema name: Value - ], - }, - "blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. - "booleanValue": True or False, # A boolean value. - "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. - "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. - "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. - "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. - "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. - }, - "integerValue": "A String", # An integer value. - "keyValue": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value. - "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. - "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. - "projectId": "A String", # The ID of the project to which the entities belong. - }, - "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. - { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. - "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. - "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - }, - ], - }, - "meaning": 42, # The `meaning` field should only be populated for backwards compatibility. - "nullValue": "A String", # A null value. - "stringValue": "A String", # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes. - "timestampValue": "A String", # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down. - }, + "a_key": # Object with schema name: Value }, }, "update": { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # The entity to update. The entity must already exist. Must have a complete key path. @@ -299,40 +266,7 @@

Method Details

], }, "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. - "a_key": { # A message that can hold any of the supported value types and associated metadata. - "arrayValue": { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`. - "values": [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for 'exclude_from_indexes'. - # Object with schema name: Value - ], - }, - "blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. - "booleanValue": True or False, # A boolean value. - "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. - "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. - "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. - "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. - "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. - }, - "integerValue": "A String", # An integer value. - "keyValue": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value. - "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. - "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. - "projectId": "A String", # The ID of the project to which the entities belong. - }, - "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. - { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. - "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. - "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - }, - ], - }, - "meaning": 42, # The `meaning` field should only be populated for backwards compatibility. - "nullValue": "A String", # A null value. - "stringValue": "A String", # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes. - "timestampValue": "A String", # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down. - }, + "a_key": # Object with schema name: Value }, }, "upsert": { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # The entity to upsert. The entity may or may not already exist. The entity key's final path element may be incomplete. @@ -350,40 +284,7 @@

Method Details

], }, "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. - "a_key": { # A message that can hold any of the supported value types and associated metadata. - "arrayValue": { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`. - "values": [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for 'exclude_from_indexes'. - # Object with schema name: Value - ], - }, - "blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. - "booleanValue": True or False, # A boolean value. - "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. - "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. - "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. - "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. - "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. - }, - "integerValue": "A String", # An integer value. - "keyValue": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value. - "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. - "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. - "projectId": "A String", # The ID of the project to which the entities belong. - }, - "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. - { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. - "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. - "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - }, - ], - }, - "meaning": 42, # The `meaning` field should only be populated for backwards compatibility. - "nullValue": "A String", # A null value. - "stringValue": "A String", # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes. - "timestampValue": "A String", # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down. - }, + "a_key": # Object with schema name: Value }, }, }, @@ -602,40 +503,7 @@

Method Details

], }, "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. - "a_key": { # A message that can hold any of the supported value types and associated metadata. - "arrayValue": { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`. - "values": [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for 'exclude_from_indexes'. - # Object with schema name: Value - ], - }, - "blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. - "booleanValue": True or False, # A boolean value. - "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. - "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. - "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. - "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. - "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. - }, - "integerValue": "A String", # An integer value. - "keyValue": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value. - "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. - "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. - "projectId": "A String", # The ID of the project to which the entities belong. - }, - "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. - { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. - "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. - "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - }, - ], - }, - "meaning": 42, # The `meaning` field should only be populated for backwards compatibility. - "nullValue": "A String", # A null value. - "stringValue": "A String", # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes. - "timestampValue": "A String", # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down. - }, + "a_key": # Object with schema name: Value }, }, "version": "A String", # The version of the entity, a strictly positive number that monotonically increases with changes to the entity. This field is set for `FULL` entity results. For missing entities in `LookupResponse`, this is the version of the snapshot that was used to look up the entity, and it is always set except for eventually consistent reads. @@ -659,40 +527,7 @@

Method Details

], }, "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. - "a_key": { # A message that can hold any of the supported value types and associated metadata. - "arrayValue": { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`. - "values": [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for 'exclude_from_indexes'. - # Object with schema name: Value - ], - }, - "blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. - "booleanValue": True or False, # A boolean value. - "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. - "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. - "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. - "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. - "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. - }, - "integerValue": "A String", # An integer value. - "keyValue": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value. - "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. - "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. - "projectId": "A String", # The ID of the project to which the entities belong. - }, - "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. - { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. - "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. - "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - }, - ], - }, - "meaning": 42, # The `meaning` field should only be populated for backwards compatibility. - "nullValue": "A String", # A null value. - "stringValue": "A String", # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes. - "timestampValue": "A String", # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down. - }, + "a_key": # Object with schema name: Value }, }, "version": "A String", # The version of the entity, a strictly positive number that monotonically increases with changes to the entity. This field is set for `FULL` entity results. For missing entities in `LookupResponse`, this is the version of the snapshot that was used to look up the entity, and it is always set except for eventually consistent reads. @@ -790,7 +625,24 @@

Method Details

"blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. "booleanValue": True or False, # A boolean value. "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "entityValue": { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "key": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # The entity's key. An entity must have a key, unless otherwise documented (for example, an entity in `Value.entity_value` may have no key). An entity's kind is its key path's last element's kind, or null if it has no key. + "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. + "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. + "projectId": "A String", # The ID of the project to which the entities belong. + }, + "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. + { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. + "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. + "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + }, + ], + }, + "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. + "a_key": # Object with schema name: Value + }, + }, "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. @@ -829,7 +681,24 @@

Method Details

"blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. "booleanValue": True or False, # A boolean value. "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "entityValue": { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "key": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # The entity's key. An entity must have a key, unless otherwise documented (for example, an entity in `Value.entity_value` may have no key). An entity's kind is its key path's last element's kind, or null if it has no key. + "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. + "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. + "projectId": "A String", # The ID of the project to which the entities belong. + }, + "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. + { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. + "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. + "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + }, + ], + }, + "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. + "a_key": # Object with schema name: Value + }, + }, "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. @@ -890,7 +759,24 @@

Method Details

"blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. "booleanValue": True or False, # A boolean value. "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "entityValue": { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "key": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # The entity's key. An entity must have a key, unless otherwise documented (for example, an entity in `Value.entity_value` may have no key). An entity's kind is its key path's last element's kind, or null if it has no key. + "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. + "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. + "projectId": "A String", # The ID of the project to which the entities belong. + }, + "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. + { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. + "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. + "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + }, + ], + }, + "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. + "a_key": # Object with schema name: Value + }, + }, "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. @@ -977,40 +863,7 @@

Method Details

], }, "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. - "a_key": { # A message that can hold any of the supported value types and associated metadata. - "arrayValue": { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`. - "values": [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for 'exclude_from_indexes'. - # Object with schema name: Value - ], - }, - "blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. - "booleanValue": True or False, # A boolean value. - "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. - "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. - "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. - "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. - "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. - }, - "integerValue": "A String", # An integer value. - "keyValue": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value. - "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. - "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. - "projectId": "A String", # The ID of the project to which the entities belong. - }, - "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. - { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. - "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. - "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - }, - ], - }, - "meaning": 42, # The `meaning` field should only be populated for backwards compatibility. - "nullValue": "A String", # A null value. - "stringValue": "A String", # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes. - "timestampValue": "A String", # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down. - }, + "a_key": # Object with schema name: Value }, }, "version": "A String", # The version of the entity, a strictly positive number that monotonically increases with changes to the entity. This field is set for `FULL` entity results. For missing entities in `LookupResponse`, this is the version of the snapshot that was used to look up the entity, and it is always set except for eventually consistent reads. @@ -1049,7 +902,24 @@

Method Details

"blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. "booleanValue": True or False, # A boolean value. "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "entityValue": { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "key": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # The entity's key. An entity must have a key, unless otherwise documented (for example, an entity in `Value.entity_value` may have no key). An entity's kind is its key path's last element's kind, or null if it has no key. + "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. + "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. + "projectId": "A String", # The ID of the project to which the entities belong. + }, + "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. + { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. + "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. + "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + }, + ], + }, + "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. + "a_key": # Object with schema name: Value + }, + }, "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. diff --git a/docs/dyn/dialogflow_v2.projects.agent.environments.html b/docs/dyn/dialogflow_v2.projects.agent.environments.html index d6512d6487d..47de8aefb06 100644 --- a/docs/dyn/dialogflow_v2.projects.agent.environments.html +++ b/docs/dyn/dialogflow_v2.projects.agent.environments.html @@ -127,7 +127,7 @@

Method Details

The object takes the form of: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # Optional. The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -182,7 +182,7 @@

Method Details

An object of the form: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # Optional. The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -261,7 +261,7 @@

Method Details

An object of the form: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # Optional. The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -369,7 +369,7 @@

Method Details

{ # The response message for Environments.ListEnvironments. "environments": [ # The list of agent environments. There will be a maximum number of items returned based on the page_size field in the request. { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # Optional. The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -442,7 +442,7 @@

Method Details

The object takes the form of: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # Optional. The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -498,7 +498,7 @@

Method Details

An object of the form: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # Optional. The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. diff --git a/docs/dyn/dialogflow_v2.projects.locations.agent.environments.html b/docs/dyn/dialogflow_v2.projects.locations.agent.environments.html index 16a24cae9cf..453186bb358 100644 --- a/docs/dyn/dialogflow_v2.projects.locations.agent.environments.html +++ b/docs/dyn/dialogflow_v2.projects.locations.agent.environments.html @@ -127,7 +127,7 @@

Method Details

The object takes the form of: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # Optional. The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -182,7 +182,7 @@

Method Details

An object of the form: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # Optional. The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -261,7 +261,7 @@

Method Details

An object of the form: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # Optional. The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -369,7 +369,7 @@

Method Details

{ # The response message for Environments.ListEnvironments. "environments": [ # The list of agent environments. There will be a maximum number of items returned based on the page_size field in the request. { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # Optional. The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -442,7 +442,7 @@

Method Details

The object takes the form of: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # Optional. The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -498,7 +498,7 @@

Method Details

An object of the form: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # Optional. The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. diff --git a/docs/dyn/dialogflow_v2beta1.projects.agent.environments.html b/docs/dyn/dialogflow_v2beta1.projects.agent.environments.html index 646f5f3d190..1eac35f948f 100644 --- a/docs/dyn/dialogflow_v2beta1.projects.agent.environments.html +++ b/docs/dyn/dialogflow_v2beta1.projects.agent.environments.html @@ -127,7 +127,7 @@

Method Details

The object takes the form of: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -182,7 +182,7 @@

Method Details

An object of the form: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -261,7 +261,7 @@

Method Details

An object of the form: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -369,7 +369,7 @@

Method Details

{ # The response message for Environments.ListEnvironments. "environments": [ # The list of agent environments. There will be a maximum number of items returned based on the page_size field in the request. { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -442,7 +442,7 @@

Method Details

The object takes the form of: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -498,7 +498,7 @@

Method Details

An object of the form: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. diff --git a/docs/dyn/dialogflow_v2beta1.projects.locations.agent.environments.html b/docs/dyn/dialogflow_v2beta1.projects.locations.agent.environments.html index 31bd1db2f69..f505c756db6 100644 --- a/docs/dyn/dialogflow_v2beta1.projects.locations.agent.environments.html +++ b/docs/dyn/dialogflow_v2beta1.projects.locations.agent.environments.html @@ -127,7 +127,7 @@

Method Details

The object takes the form of: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -182,7 +182,7 @@

Method Details

An object of the form: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -261,7 +261,7 @@

Method Details

An object of the form: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -369,7 +369,7 @@

Method Details

{ # The response message for Environments.ListEnvironments. "environments": [ # The list of agent environments. There will be a maximum number of items returned based on the page_size field in the request. { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -442,7 +442,7 @@

Method Details

The object takes the form of: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -498,7 +498,7 @@

Method Details

An object of the form: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. diff --git a/docs/dyn/drive_v2.drives.html b/docs/dyn/drive_v2.drives.html index 4d66097b526..ed99df2ff87 100644 --- a/docs/dyn/drive_v2.drives.html +++ b/docs/dyn/drive_v2.drives.html @@ -330,7 +330,7 @@

Method Details

Lists the user's shared drives.
 
 Args:
-  maxResults: integer, Maximum number of shared drives to return.
+  maxResults: integer, Maximum number of shared drives to return per page.
   pageToken: string, Page token for shared drives.
   q: string, Query string for searching shared drives.
   useDomainAdminAccess: boolean, Issue the request as a domain administrator; if set to true, then all shared drives of the domain in which the requester is an administrator are returned.
diff --git a/docs/dyn/drive_v3.drives.html b/docs/dyn/drive_v3.drives.html
index 73825b853e3..2e72bf1e512 100644
--- a/docs/dyn/drive_v3.drives.html
+++ b/docs/dyn/drive_v3.drives.html
@@ -330,7 +330,7 @@ 

Method Details

Lists the user's shared drives.
 
 Args:
-  pageSize: integer, Maximum number of shared drives to return.
+  pageSize: integer, Maximum number of shared drives to return per page.
   pageToken: string, Page token for shared drives.
   q: string, Query string for searching shared drives.
   useDomainAdminAccess: boolean, Issue the request as a domain administrator; if set to true, then all shared drives of the domain in which the requester is an administrator are returned.
diff --git a/docs/dyn/firebaseappcheck_v1beta.projects.apps.deviceCheckConfig.html b/docs/dyn/firebaseappcheck_v1beta.projects.apps.deviceCheckConfig.html
index 2523bb76e9f..667c6e9ac98 100644
--- a/docs/dyn/firebaseappcheck_v1beta.projects.apps.deviceCheckConfig.html
+++ b/docs/dyn/firebaseappcheck_v1beta.projects.apps.deviceCheckConfig.html
@@ -104,7 +104,7 @@ 

Method Details

{ # Response message for the BatchGetDeviceCheckConfigs method. "configs": [ # DeviceCheckConfigs retrieved. - { # An app's DeviceCheck configuration object. This configuration is used by ExchangeDeviceCheckToken to validate device tokens issued to apps by DeviceCheck. + { # An app's DeviceCheck configuration object. This configuration is used by ExchangeDeviceCheckToken to validate device tokens issued to apps by DeviceCheck. It also controls certain properties of the returned App Check token, such as its ttl. Note that the Team ID registered with your app is used as part of the validation process. Please register it via the Firebase Console or programmatically via the [Firebase Management Service](https://firebase.google.com/docs/projects/api/reference/rest/v1beta1/projects.iosApps/patch). "keyId": "A String", # Required. The key identifier of a private key enabled with DeviceCheck, created in your Apple Developer account. "name": "A String", # Required. The relative resource name of the DeviceCheck configuration object, in the format: ``` projects/{project_number}/apps/{app_id}/deviceCheckConfig ``` "privateKey": "A String", # Required. Input only. The contents of the private key (`.p8`) file associated with the key specified by `key_id`. For security reasons, this field will never be populated in any response. @@ -133,7 +133,7 @@

Method Details

Returns: An object of the form: - { # An app's DeviceCheck configuration object. This configuration is used by ExchangeDeviceCheckToken to validate device tokens issued to apps by DeviceCheck. + { # An app's DeviceCheck configuration object. This configuration is used by ExchangeDeviceCheckToken to validate device tokens issued to apps by DeviceCheck. It also controls certain properties of the returned App Check token, such as its ttl. Note that the Team ID registered with your app is used as part of the validation process. Please register it via the Firebase Console or programmatically via the [Firebase Management Service](https://firebase.google.com/docs/projects/api/reference/rest/v1beta1/projects.iosApps/patch). "keyId": "A String", # Required. The key identifier of a private key enabled with DeviceCheck, created in your Apple Developer account. "name": "A String", # Required. The relative resource name of the DeviceCheck configuration object, in the format: ``` projects/{project_number}/apps/{app_id}/deviceCheckConfig ``` "privateKey": "A String", # Required. Input only. The contents of the private key (`.p8`) file associated with the key specified by `key_id`. For security reasons, this field will never be populated in any response. @@ -150,7 +150,7 @@

Method Details

body: object, The request body. The object takes the form of: -{ # An app's DeviceCheck configuration object. This configuration is used by ExchangeDeviceCheckToken to validate device tokens issued to apps by DeviceCheck. +{ # An app's DeviceCheck configuration object. This configuration is used by ExchangeDeviceCheckToken to validate device tokens issued to apps by DeviceCheck. It also controls certain properties of the returned App Check token, such as its ttl. Note that the Team ID registered with your app is used as part of the validation process. Please register it via the Firebase Console or programmatically via the [Firebase Management Service](https://firebase.google.com/docs/projects/api/reference/rest/v1beta1/projects.iosApps/patch). "keyId": "A String", # Required. The key identifier of a private key enabled with DeviceCheck, created in your Apple Developer account. "name": "A String", # Required. The relative resource name of the DeviceCheck configuration object, in the format: ``` projects/{project_number}/apps/{app_id}/deviceCheckConfig ``` "privateKey": "A String", # Required. Input only. The contents of the private key (`.p8`) file associated with the key specified by `key_id`. For security reasons, this field will never be populated in any response. @@ -166,7 +166,7 @@

Method Details

Returns: An object of the form: - { # An app's DeviceCheck configuration object. This configuration is used by ExchangeDeviceCheckToken to validate device tokens issued to apps by DeviceCheck. + { # An app's DeviceCheck configuration object. This configuration is used by ExchangeDeviceCheckToken to validate device tokens issued to apps by DeviceCheck. It also controls certain properties of the returned App Check token, such as its ttl. Note that the Team ID registered with your app is used as part of the validation process. Please register it via the Firebase Console or programmatically via the [Firebase Management Service](https://firebase.google.com/docs/projects/api/reference/rest/v1beta1/projects.iosApps/patch). "keyId": "A String", # Required. The key identifier of a private key enabled with DeviceCheck, created in your Apple Developer account. "name": "A String", # Required. The relative resource name of the DeviceCheck configuration object, in the format: ``` projects/{project_number}/apps/{app_id}/deviceCheckConfig ``` "privateKey": "A String", # Required. Input only. The contents of the private key (`.p8`) file associated with the key specified by `key_id`. For security reasons, this field will never be populated in any response. diff --git a/docs/dyn/firebaseappcheck_v1beta.projects.apps.html b/docs/dyn/firebaseappcheck_v1beta.projects.apps.html index bbbdbcbe312..1ebfac46a22 100644 --- a/docs/dyn/firebaseappcheck_v1beta.projects.apps.html +++ b/docs/dyn/firebaseappcheck_v1beta.projects.apps.html @@ -174,7 +174,7 @@

Method Details

Returns: An object of the form: - { # Response message for ExchangeAppAttestAttestation + { # Response message for ExchangeAppAttestAttestation and ExchangeAppAttestDebugAttestation "artifact": "A String", # An artifact that should be passed back during the Assertion flow. "attestationToken": { # Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check. # An attestation token which can be used to access Firebase APIs. "attestationToken": "A String", # An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. diff --git a/docs/dyn/firebaseappcheck_v1beta.projects.apps.recaptchaConfig.html b/docs/dyn/firebaseappcheck_v1beta.projects.apps.recaptchaConfig.html index 3b52ad55a11..b7f1a05c354 100644 --- a/docs/dyn/firebaseappcheck_v1beta.projects.apps.recaptchaConfig.html +++ b/docs/dyn/firebaseappcheck_v1beta.projects.apps.recaptchaConfig.html @@ -104,7 +104,7 @@

Method Details

{ # Response message for the BatchGetRecaptchaConfigs method. "configs": [ # RecaptchaConfigs retrieved. - { # An app's reCAPTCHA v3 configuration object. This configuration is used by ExchangeRecaptchaToken to validate reCAPTCHA tokens issued to apps by reCAPTCHA v3. + { # An app's reCAPTCHA v3 configuration object. This configuration is used by ExchangeRecaptchaToken to validate reCAPTCHA tokens issued to apps by reCAPTCHA v3. It also controls certain properties of the returned App Check token, such as its ttl. "name": "A String", # Required. The relative resource name of the reCAPTCHA v3 configuration object, in the format: ``` projects/{project_number}/apps/{app_id}/recaptchaConfig ``` "siteSecret": "A String", # Required. Input only. The site secret used to identify your service for reCAPTCHA v3 verification. For security reasons, this field will never be populated in any response. "siteSecretSet": True or False, # Output only. Whether the `site_secret` field was previously set. Since we will never return the `site_secret` field, this field is the only way to find out whether it was previously set. @@ -132,7 +132,7 @@

Method Details

Returns: An object of the form: - { # An app's reCAPTCHA v3 configuration object. This configuration is used by ExchangeRecaptchaToken to validate reCAPTCHA tokens issued to apps by reCAPTCHA v3. + { # An app's reCAPTCHA v3 configuration object. This configuration is used by ExchangeRecaptchaToken to validate reCAPTCHA tokens issued to apps by reCAPTCHA v3. It also controls certain properties of the returned App Check token, such as its ttl. "name": "A String", # Required. The relative resource name of the reCAPTCHA v3 configuration object, in the format: ``` projects/{project_number}/apps/{app_id}/recaptchaConfig ``` "siteSecret": "A String", # Required. Input only. The site secret used to identify your service for reCAPTCHA v3 verification. For security reasons, this field will never be populated in any response. "siteSecretSet": True or False, # Output only. Whether the `site_secret` field was previously set. Since we will never return the `site_secret` field, this field is the only way to find out whether it was previously set. @@ -148,7 +148,7 @@

Method Details

body: object, The request body. The object takes the form of: -{ # An app's reCAPTCHA v3 configuration object. This configuration is used by ExchangeRecaptchaToken to validate reCAPTCHA tokens issued to apps by reCAPTCHA v3. +{ # An app's reCAPTCHA v3 configuration object. This configuration is used by ExchangeRecaptchaToken to validate reCAPTCHA tokens issued to apps by reCAPTCHA v3. It also controls certain properties of the returned App Check token, such as its ttl. "name": "A String", # Required. The relative resource name of the reCAPTCHA v3 configuration object, in the format: ``` projects/{project_number}/apps/{app_id}/recaptchaConfig ``` "siteSecret": "A String", # Required. Input only. The site secret used to identify your service for reCAPTCHA v3 verification. For security reasons, this field will never be populated in any response. "siteSecretSet": True or False, # Output only. Whether the `site_secret` field was previously set. Since we will never return the `site_secret` field, this field is the only way to find out whether it was previously set. @@ -163,7 +163,7 @@

Method Details

Returns: An object of the form: - { # An app's reCAPTCHA v3 configuration object. This configuration is used by ExchangeRecaptchaToken to validate reCAPTCHA tokens issued to apps by reCAPTCHA v3. + { # An app's reCAPTCHA v3 configuration object. This configuration is used by ExchangeRecaptchaToken to validate reCAPTCHA tokens issued to apps by reCAPTCHA v3. It also controls certain properties of the returned App Check token, such as its ttl. "name": "A String", # Required. The relative resource name of the reCAPTCHA v3 configuration object, in the format: ``` projects/{project_number}/apps/{app_id}/recaptchaConfig ``` "siteSecret": "A String", # Required. Input only. The site secret used to identify your service for reCAPTCHA v3 verification. For security reasons, this field will never be populated in any response. "siteSecretSet": True or False, # Output only. Whether the `site_secret` field was previously set. Since we will never return the `site_secret` field, this field is the only way to find out whether it was previously set. diff --git a/docs/dyn/firestore_v1beta1.projects.databases.documents.html b/docs/dyn/firestore_v1beta1.projects.databases.documents.html index 352fb5224ab..712832b8a03 100644 --- a/docs/dyn/firestore_v1beta1.projects.databases.documents.html +++ b/docs/dyn/firestore_v1beta1.projects.databases.documents.html @@ -175,11 +175,7 @@

Method Details

"createTime": "A String", # Output only. The time at which the document was created. This value increases monotonically when a document is deleted then recreated. It can also be compared to values from other documents and the `read_time` of a query. "fields": { # The document's fields. The map keys represent field names. A simple field name contains only characters `a` to `z`, `A` to `Z`, `0` to `9`, or `_`, and must not start with `0` to `9`. For example, `foo_bar_17`. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. Field paths may be used in other contexts to refer to structured fields defined here. For `map_value`, the field path is represented by the simple or quoted field names of the containing fields, delimited by `.`. For example, the structured field `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be represented by the field path `foo.x&y`. Within a field path, a quoted field name starts and ends with `` ` `` and may contain any character. Some characters, including `` ` ``, must be escaped using a `\`. For example, `` `x&y` `` represents `x&y` and `` `bak\`tik` `` represents `` bak`tik ``. "a_key": { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -234,16 +230,31 @@

Method Details

{ # A transformation of a field of the document. "appendMissingElements": { # An array value. # Append the given elements in order if they are not already present in the current field value. If the field is not an array, or if the field does not yet exist, it is first set to the empty array. Equivalent numbers of different types (e.g. 3L and 3.0) are considered equal when checking if a value is missing. NaN is equal to NaN, and Null is equal to Null. If the input contains multiple equivalent values, only the first will be considered. The corresponding transform_result will be the null value. "values": [ # Values in the array. - # Object with schema name: Value + { # A message that can hold any of the supported value types. + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. + "booleanValue": True or False, # A boolean value. + "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. + "doubleValue": 3.14, # A double value. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "mapValue": { # A map value. # A map value. + "fields": { # The map's fields. The map keys represent field names. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. + "a_key": # Object with schema name: Value + }, + }, + "nullValue": "A String", # A null value. + "referenceValue": "A String", # A reference to a document. For example: `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + "stringValue": "A String", # A string value. The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes of the UTF-8 representation are considered by queries. + "timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. + }, ], }, "fieldPath": "A String", # The path of the field. See Document.fields for the field path syntax reference. "increment": { # A message that can hold any of the supported value types. # Adds the given value to the field's current value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the given value. If either of the given value or the current field value are doubles, both values will be interpreted as doubles. Double arithmetic and representation of double values follow IEEE 754 semantics. If there is positive/negative integer overflow, the field is resolved to the largest magnitude positive/negative integer. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -263,11 +274,7 @@

Method Details

"timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. }, "maximum": { # A message that can hold any of the supported value types. # Sets the field to the maximum of its current value and the given value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the given value. If a maximum operation is applied where the field and the input value are of mixed types (that is - one is an integer and one is a double) the field takes on the type of the larger operand. If the operands are equivalent (e.g. 3 and 3.0), the field does not change. 0, 0.0, and -0.0 are all zero. The maximum of a zero stored value and zero input value is always the stored value. The maximum of any numeric value x and NaN is NaN. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -287,11 +294,7 @@

Method Details

"timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. }, "minimum": { # A message that can hold any of the supported value types. # Sets the field to the minimum of its current value and the given value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the input value. If a minimum operation is applied where the field and the input value are of mixed types (that is - one is an integer and one is a double) the field takes on the type of the smaller operand. If the operands are equivalent (e.g. 3 and 3.0), the field does not change. 0, 0.0, and -0.0 are all zero. The minimum of a zero stored value and zero input value is always the stored value. The minimum of any numeric value x and NaN is NaN. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -312,7 +315,26 @@

Method Details

}, "removeAllFromArray": { # An array value. # Remove all of the given elements from the array in the field. If the field is not an array, or if the field does not yet exist, it is set to the empty array. Equivalent numbers of the different types (e.g. 3L and 3.0) are considered equal when deciding whether an element should be removed. NaN is equal to NaN, and Null is equal to Null. This will remove all equivalent values if there are duplicates. The corresponding transform_result will be the null value. "values": [ # Values in the array. - # Object with schema name: Value + { # A message that can hold any of the supported value types. + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. + "booleanValue": True or False, # A boolean value. + "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. + "doubleValue": 3.14, # A double value. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "mapValue": { # A map value. # A map value. + "fields": { # The map's fields. The map keys represent field names. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. + "a_key": # Object with schema name: Value + }, + }, + "nullValue": "A String", # A null value. + "referenceValue": "A String", # A reference to a document. For example: `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + "stringValue": "A String", # A string value. The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes of the UTF-8 representation are considered by queries. + "timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. + }, ], }, "setToServerValue": "A String", # Sets the field to the given server value. @@ -323,11 +345,7 @@

Method Details

"createTime": "A String", # Output only. The time at which the document was created. This value increases monotonically when a document is deleted then recreated. It can also be compared to values from other documents and the `read_time` of a query. "fields": { # The document's fields. The map keys represent field names. A simple field name contains only characters `a` to `z`, `A` to `Z`, `0` to `9`, or `_`, and must not start with `0` to `9`. For example, `foo_bar_17`. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. Field paths may be used in other contexts to refer to structured fields defined here. For `map_value`, the field path is represented by the simple or quoted field names of the containing fields, delimited by `.`. For example, the structured field `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be represented by the field path `foo.x&y`. Within a field path, a quoted field name starts and ends with `` ` `` and may contain any character. Some characters, including `` ` ``, must be escaped using a `\`. For example, `` `x&y` `` represents `x&y` and `` `bak\`tik` `` represents `` bak`tik ``. "a_key": { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -359,16 +377,31 @@

Method Details

{ # A transformation of a field of the document. "appendMissingElements": { # An array value. # Append the given elements in order if they are not already present in the current field value. If the field is not an array, or if the field does not yet exist, it is first set to the empty array. Equivalent numbers of different types (e.g. 3L and 3.0) are considered equal when checking if a value is missing. NaN is equal to NaN, and Null is equal to Null. If the input contains multiple equivalent values, only the first will be considered. The corresponding transform_result will be the null value. "values": [ # Values in the array. - # Object with schema name: Value + { # A message that can hold any of the supported value types. + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. + "booleanValue": True or False, # A boolean value. + "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. + "doubleValue": 3.14, # A double value. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "mapValue": { # A map value. # A map value. + "fields": { # The map's fields. The map keys represent field names. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. + "a_key": # Object with schema name: Value + }, + }, + "nullValue": "A String", # A null value. + "referenceValue": "A String", # A reference to a document. For example: `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + "stringValue": "A String", # A string value. The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes of the UTF-8 representation are considered by queries. + "timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. + }, ], }, "fieldPath": "A String", # The path of the field. See Document.fields for the field path syntax reference. "increment": { # A message that can hold any of the supported value types. # Adds the given value to the field's current value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the given value. If either of the given value or the current field value are doubles, both values will be interpreted as doubles. Double arithmetic and representation of double values follow IEEE 754 semantics. If there is positive/negative integer overflow, the field is resolved to the largest magnitude positive/negative integer. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -388,11 +421,7 @@

Method Details

"timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. }, "maximum": { # A message that can hold any of the supported value types. # Sets the field to the maximum of its current value and the given value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the given value. If a maximum operation is applied where the field and the input value are of mixed types (that is - one is an integer and one is a double) the field takes on the type of the larger operand. If the operands are equivalent (e.g. 3 and 3.0), the field does not change. 0, 0.0, and -0.0 are all zero. The maximum of a zero stored value and zero input value is always the stored value. The maximum of any numeric value x and NaN is NaN. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -412,11 +441,7 @@

Method Details

"timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. }, "minimum": { # A message that can hold any of the supported value types. # Sets the field to the minimum of its current value and the given value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the input value. If a minimum operation is applied where the field and the input value are of mixed types (that is - one is an integer and one is a double) the field takes on the type of the smaller operand. If the operands are equivalent (e.g. 3 and 3.0), the field does not change. 0, 0.0, and -0.0 are all zero. The minimum of a zero stored value and zero input value is always the stored value. The minimum of any numeric value x and NaN is NaN. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -437,7 +462,26 @@

Method Details

}, "removeAllFromArray": { # An array value. # Remove all of the given elements from the array in the field. If the field is not an array, or if the field does not yet exist, it is set to the empty array. Equivalent numbers of the different types (e.g. 3L and 3.0) are considered equal when deciding whether an element should be removed. NaN is equal to NaN, and Null is equal to Null. This will remove all equivalent values if there are duplicates. The corresponding transform_result will be the null value. "values": [ # Values in the array. - # Object with schema name: Value + { # A message that can hold any of the supported value types. + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. + "booleanValue": True or False, # A boolean value. + "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. + "doubleValue": 3.14, # A double value. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "mapValue": { # A map value. # A map value. + "fields": { # The map's fields. The map keys represent field names. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. + "a_key": # Object with schema name: Value + }, + }, + "nullValue": "A String", # A null value. + "referenceValue": "A String", # A reference to a document. For example: `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + "stringValue": "A String", # A string value. The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes of the UTF-8 representation are considered by queries. + "timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. + }, ], }, "setToServerValue": "A String", # Sets the field to the given server value. @@ -471,11 +515,7 @@

Method Details

{ # The result of applying a write. "transformResults": [ # The results of applying each DocumentTransform.FieldTransform, in the same order. { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -563,16 +603,31 @@

Method Details

{ # A transformation of a field of the document. "appendMissingElements": { # An array value. # Append the given elements in order if they are not already present in the current field value. If the field is not an array, or if the field does not yet exist, it is first set to the empty array. Equivalent numbers of different types (e.g. 3L and 3.0) are considered equal when checking if a value is missing. NaN is equal to NaN, and Null is equal to Null. If the input contains multiple equivalent values, only the first will be considered. The corresponding transform_result will be the null value. "values": [ # Values in the array. - # Object with schema name: Value + { # A message that can hold any of the supported value types. + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. + "booleanValue": True or False, # A boolean value. + "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. + "doubleValue": 3.14, # A double value. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "mapValue": { # A map value. # A map value. + "fields": { # The map's fields. The map keys represent field names. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. + "a_key": # Object with schema name: Value + }, + }, + "nullValue": "A String", # A null value. + "referenceValue": "A String", # A reference to a document. For example: `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + "stringValue": "A String", # A string value. The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes of the UTF-8 representation are considered by queries. + "timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. + }, ], }, "fieldPath": "A String", # The path of the field. See Document.fields for the field path syntax reference. "increment": { # A message that can hold any of the supported value types. # Adds the given value to the field's current value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the given value. If either of the given value or the current field value are doubles, both values will be interpreted as doubles. Double arithmetic and representation of double values follow IEEE 754 semantics. If there is positive/negative integer overflow, the field is resolved to the largest magnitude positive/negative integer. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -592,11 +647,7 @@

Method Details

"timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. }, "maximum": { # A message that can hold any of the supported value types. # Sets the field to the maximum of its current value and the given value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the given value. If a maximum operation is applied where the field and the input value are of mixed types (that is - one is an integer and one is a double) the field takes on the type of the larger operand. If the operands are equivalent (e.g. 3 and 3.0), the field does not change. 0, 0.0, and -0.0 are all zero. The maximum of a zero stored value and zero input value is always the stored value. The maximum of any numeric value x and NaN is NaN. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -616,11 +667,7 @@

Method Details

"timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. }, "minimum": { # A message that can hold any of the supported value types. # Sets the field to the minimum of its current value and the given value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the input value. If a minimum operation is applied where the field and the input value are of mixed types (that is - one is an integer and one is a double) the field takes on the type of the smaller operand. If the operands are equivalent (e.g. 3 and 3.0), the field does not change. 0, 0.0, and -0.0 are all zero. The minimum of a zero stored value and zero input value is always the stored value. The minimum of any numeric value x and NaN is NaN. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -641,7 +688,26 @@

Method Details

}, "removeAllFromArray": { # An array value. # Remove all of the given elements from the array in the field. If the field is not an array, or if the field does not yet exist, it is set to the empty array. Equivalent numbers of the different types (e.g. 3L and 3.0) are considered equal when deciding whether an element should be removed. NaN is equal to NaN, and Null is equal to Null. This will remove all equivalent values if there are duplicates. The corresponding transform_result will be the null value. "values": [ # Values in the array. - # Object with schema name: Value + { # A message that can hold any of the supported value types. + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. + "booleanValue": True or False, # A boolean value. + "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. + "doubleValue": 3.14, # A double value. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "mapValue": { # A map value. # A map value. + "fields": { # The map's fields. The map keys represent field names. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. + "a_key": # Object with schema name: Value + }, + }, + "nullValue": "A String", # A null value. + "referenceValue": "A String", # A reference to a document. For example: `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + "stringValue": "A String", # A string value. The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes of the UTF-8 representation are considered by queries. + "timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. + }, ], }, "setToServerValue": "A String", # Sets the field to the given server value. @@ -652,11 +718,7 @@

Method Details

"createTime": "A String", # Output only. The time at which the document was created. This value increases monotonically when a document is deleted then recreated. It can also be compared to values from other documents and the `read_time` of a query. "fields": { # The document's fields. The map keys represent field names. A simple field name contains only characters `a` to `z`, `A` to `Z`, `0` to `9`, or `_`, and must not start with `0` to `9`. For example, `foo_bar_17`. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. Field paths may be used in other contexts to refer to structured fields defined here. For `map_value`, the field path is represented by the simple or quoted field names of the containing fields, delimited by `.`. For example, the structured field `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be represented by the field path `foo.x&y`. Within a field path, a quoted field name starts and ends with `` ` `` and may contain any character. Some characters, including `` ` ``, must be escaped using a `\`. For example, `` `x&y` `` represents `x&y` and `` `bak\`tik` `` represents `` bak`tik ``. "a_key": { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -688,16 +750,31 @@

Method Details

{ # A transformation of a field of the document. "appendMissingElements": { # An array value. # Append the given elements in order if they are not already present in the current field value. If the field is not an array, or if the field does not yet exist, it is first set to the empty array. Equivalent numbers of different types (e.g. 3L and 3.0) are considered equal when checking if a value is missing. NaN is equal to NaN, and Null is equal to Null. If the input contains multiple equivalent values, only the first will be considered. The corresponding transform_result will be the null value. "values": [ # Values in the array. - # Object with schema name: Value + { # A message that can hold any of the supported value types. + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. + "booleanValue": True or False, # A boolean value. + "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. + "doubleValue": 3.14, # A double value. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "mapValue": { # A map value. # A map value. + "fields": { # The map's fields. The map keys represent field names. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. + "a_key": # Object with schema name: Value + }, + }, + "nullValue": "A String", # A null value. + "referenceValue": "A String", # A reference to a document. For example: `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + "stringValue": "A String", # A string value. The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes of the UTF-8 representation are considered by queries. + "timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. + }, ], }, "fieldPath": "A String", # The path of the field. See Document.fields for the field path syntax reference. "increment": { # A message that can hold any of the supported value types. # Adds the given value to the field's current value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the given value. If either of the given value or the current field value are doubles, both values will be interpreted as doubles. Double arithmetic and representation of double values follow IEEE 754 semantics. If there is positive/negative integer overflow, the field is resolved to the largest magnitude positive/negative integer. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -717,11 +794,7 @@

Method Details

"timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. }, "maximum": { # A message that can hold any of the supported value types. # Sets the field to the maximum of its current value and the given value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the given value. If a maximum operation is applied where the field and the input value are of mixed types (that is - one is an integer and one is a double) the field takes on the type of the larger operand. If the operands are equivalent (e.g. 3 and 3.0), the field does not change. 0, 0.0, and -0.0 are all zero. The maximum of a zero stored value and zero input value is always the stored value. The maximum of any numeric value x and NaN is NaN. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -741,11 +814,7 @@

Method Details

"timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. }, "minimum": { # A message that can hold any of the supported value types. # Sets the field to the minimum of its current value and the given value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the input value. If a minimum operation is applied where the field and the input value are of mixed types (that is - one is an integer and one is a double) the field takes on the type of the smaller operand. If the operands are equivalent (e.g. 3 and 3.0), the field does not change. 0, 0.0, and -0.0 are all zero. The minimum of a zero stored value and zero input value is always the stored value. The minimum of any numeric value x and NaN is NaN. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -766,7 +835,26 @@

Method Details

}, "removeAllFromArray": { # An array value. # Remove all of the given elements from the array in the field. If the field is not an array, or if the field does not yet exist, it is set to the empty array. Equivalent numbers of the different types (e.g. 3L and 3.0) are considered equal when deciding whether an element should be removed. NaN is equal to NaN, and Null is equal to Null. This will remove all equivalent values if there are duplicates. The corresponding transform_result will be the null value. "values": [ # Values in the array. - # Object with schema name: Value + { # A message that can hold any of the supported value types. + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. + "booleanValue": True or False, # A boolean value. + "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. + "doubleValue": 3.14, # A double value. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "mapValue": { # A map value. # A map value. + "fields": { # The map's fields. The map keys represent field names. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. + "a_key": # Object with schema name: Value + }, + }, + "nullValue": "A String", # A null value. + "referenceValue": "A String", # A reference to a document. For example: `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + "stringValue": "A String", # A string value. The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes of the UTF-8 representation are considered by queries. + "timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. + }, ], }, "setToServerValue": "A String", # Sets the field to the given server value. @@ -790,11 +878,7 @@

Method Details

{ # The result of applying a write. "transformResults": [ # The results of applying each DocumentTransform.FieldTransform, in the same order. { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -834,11 +918,7 @@

Method Details

"createTime": "A String", # Output only. The time at which the document was created. This value increases monotonically when a document is deleted then recreated. It can also be compared to values from other documents and the `read_time` of a query. "fields": { # The document's fields. The map keys represent field names. A simple field name contains only characters `a` to `z`, `A` to `Z`, `0` to `9`, or `_`, and must not start with `0` to `9`. For example, `foo_bar_17`. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. Field paths may be used in other contexts to refer to structured fields defined here. For `map_value`, the field path is represented by the simple or quoted field names of the containing fields, delimited by `.`. For example, the structured field `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be represented by the field path `foo.x&y`. Within a field path, a quoted field name starts and ends with `` ` `` and may contain any character. Some characters, including `` ` ``, must be escaped using a `\`. For example, `` `x&y` `` represents `x&y` and `` `bak\`tik` `` represents `` bak`tik ``. "a_key": { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -876,11 +956,7 @@

Method Details

"createTime": "A String", # Output only. The time at which the document was created. This value increases monotonically when a document is deleted then recreated. It can also be compared to values from other documents and the `read_time` of a query. "fields": { # The document's fields. The map keys represent field names. A simple field name contains only characters `a` to `z`, `A` to `Z`, `0` to `9`, or `_`, and must not start with `0` to `9`. For example, `foo_bar_17`. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. Field paths may be used in other contexts to refer to structured fields defined here. For `map_value`, the field path is represented by the simple or quoted field names of the containing fields, delimited by `.`. For example, the structured field `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be represented by the field path `foo.x&y`. Within a field path, a quoted field name starts and ends with `` ` `` and may contain any character. Some characters, including `` ` ``, must be escaped using a `\`. For example, `` `x&y` `` represents `x&y` and `` `bak\`tik` `` represents `` bak`tik ``. "a_key": { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -946,11 +1022,7 @@

Method Details

"createTime": "A String", # Output only. The time at which the document was created. This value increases monotonically when a document is deleted then recreated. It can also be compared to values from other documents and the `read_time` of a query. "fields": { # The document's fields. The map keys represent field names. A simple field name contains only characters `a` to `z`, `A` to `Z`, `0` to `9`, or `_`, and must not start with `0` to `9`. For example, `foo_bar_17`. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. Field paths may be used in other contexts to refer to structured fields defined here. For `map_value`, the field path is represented by the simple or quoted field names of the containing fields, delimited by `.`. For example, the structured field `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be represented by the field path `foo.x&y`. Within a field path, a quoted field name starts and ends with `` ` `` and may contain any character. Some characters, including `` ` ``, must be escaped using a `\`. For example, `` `x&y` `` represents `x&y` and `` `bak\`tik` `` represents `` bak`tik ``. "a_key": { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1003,11 +1075,7 @@

Method Details

"createTime": "A String", # Output only. The time at which the document was created. This value increases monotonically when a document is deleted then recreated. It can also be compared to values from other documents and the `read_time` of a query. "fields": { # The document's fields. The map keys represent field names. A simple field name contains only characters `a` to `z`, `A` to `Z`, `0` to `9`, or `_`, and must not start with `0` to `9`. For example, `foo_bar_17`. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. Field paths may be used in other contexts to refer to structured fields defined here. For `map_value`, the field path is represented by the simple or quoted field names of the containing fields, delimited by `.`. For example, the structured field `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be represented by the field path `foo.x&y`. Within a field path, a quoted field name starts and ends with `` ` `` and may contain any character. Some characters, including `` ` ``, must be escaped using a `\`. For example, `` `x&y` `` represents `x&y` and `` `bak\`tik` `` represents `` bak`tik ``. "a_key": { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1117,11 +1185,7 @@

Method Details

"before": True or False, # If the position is just before or just after the given values, relative to the sort order defined by the query. "values": [ # The values that represent a position, in the order they appear in the order by clause of a query. Can contain fewer values than specified in the order by clause. { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1169,11 +1233,7 @@

Method Details

"before": True or False, # If the position is just before or just after the given values, relative to the sort order defined by the query. "values": [ # The values that represent a position, in the order they appear in the order by clause of a query. Can contain fewer values than specified in the order by clause. { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1207,11 +1267,7 @@

Method Details

}, "op": "A String", # The operator to filter by. "value": { # A message that can hold any of the supported value types. # The value to compare to. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1264,11 +1320,7 @@

Method Details

"createTime": "A String", # Output only. The time at which the document was created. This value increases monotonically when a document is deleted then recreated. It can also be compared to values from other documents and the `read_time` of a query. "fields": { # The document's fields. The map keys represent field names. A simple field name contains only characters `a` to `z`, `A` to `Z`, `0` to `9`, or `_`, and must not start with `0` to `9`. For example, `foo_bar_17`. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. Field paths may be used in other contexts to refer to structured fields defined here. For `map_value`, the field path is represented by the simple or quoted field names of the containing fields, delimited by `.`. For example, the structured field `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be represented by the field path `foo.x&y`. Within a field path, a quoted field name starts and ends with `` ` `` and may contain any character. Some characters, including `` ` ``, must be escaped using a `\`. For example, `` `x&y` `` represents `x&y` and `` `bak\`tik` `` represents `` bak`tik ``. "a_key": { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1354,11 +1406,7 @@

Method Details

"before": True or False, # If the position is just before or just after the given values, relative to the sort order defined by the query. "values": [ # The values that represent a position, in the order they appear in the order by clause of a query. Can contain fewer values than specified in the order by clause. { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1406,11 +1454,7 @@

Method Details

"before": True or False, # If the position is just before or just after the given values, relative to the sort order defined by the query. "values": [ # The values that represent a position, in the order they appear in the order by clause of a query. Can contain fewer values than specified in the order by clause. { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1444,11 +1488,7 @@

Method Details

}, "op": "A String", # The operator to filter by. "value": { # A message that can hold any of the supported value types. # The value to compare to. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1493,11 +1533,7 @@

Method Details

"before": True or False, # If the position is just before or just after the given values, relative to the sort order defined by the query. "values": [ # The values that represent a position, in the order they appear in the order by clause of a query. Can contain fewer values than specified in the order by clause. { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1549,11 +1585,7 @@

Method Details

"createTime": "A String", # Output only. The time at which the document was created. This value increases monotonically when a document is deleted then recreated. It can also be compared to values from other documents and the `read_time` of a query. "fields": { # The document's fields. The map keys represent field names. A simple field name contains only characters `a` to `z`, `A` to `Z`, `0` to `9`, or `_`, and must not start with `0` to `9`. For example, `foo_bar_17`. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. Field paths may be used in other contexts to refer to structured fields defined here. For `map_value`, the field path is represented by the simple or quoted field names of the containing fields, delimited by `.`. For example, the structured field `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be represented by the field path `foo.x&y`. Within a field path, a quoted field name starts and ends with `` ` `` and may contain any character. Some characters, including `` ` ``, must be escaped using a `\`. For example, `` `x&y` `` represents `x&y` and `` `bak\`tik` `` represents `` bak`tik ``. "a_key": { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1593,11 +1625,7 @@

Method Details

"createTime": "A String", # Output only. The time at which the document was created. This value increases monotonically when a document is deleted then recreated. It can also be compared to values from other documents and the `read_time` of a query. "fields": { # The document's fields. The map keys represent field names. A simple field name contains only characters `a` to `z`, `A` to `Z`, `0` to `9`, or `_`, and must not start with `0` to `9`. For example, `foo_bar_17`. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. Field paths may be used in other contexts to refer to structured fields defined here. For `map_value`, the field path is represented by the simple or quoted field names of the containing fields, delimited by `.`. For example, the structured field `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be represented by the field path `foo.x&y`. Within a field path, a quoted field name starts and ends with `` ` `` and may contain any character. Some characters, including `` ` ``, must be escaped using a `\`. For example, `` `x&y` `` represents `x&y` and `` `bak\`tik` `` represents `` bak`tik ``. "a_key": { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1671,11 +1699,7 @@

Method Details

"before": True or False, # If the position is just before or just after the given values, relative to the sort order defined by the query. "values": [ # The values that represent a position, in the order they appear in the order by clause of a query. Can contain fewer values than specified in the order by clause. { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1723,11 +1747,7 @@

Method Details

"before": True or False, # If the position is just before or just after the given values, relative to the sort order defined by the query. "values": [ # The values that represent a position, in the order they appear in the order by clause of a query. Can contain fewer values than specified in the order by clause. { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1761,11 +1781,7 @@

Method Details

}, "op": "A String", # The operator to filter by. "value": { # A message that can hold any of the supported value types. # The value to compare to. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1809,11 +1825,7 @@

Method Details

"createTime": "A String", # Output only. The time at which the document was created. This value increases monotonically when a document is deleted then recreated. It can also be compared to values from other documents and the `read_time` of a query. "fields": { # The document's fields. The map keys represent field names. A simple field name contains only characters `a` to `z`, `A` to `Z`, `0` to `9`, or `_`, and must not start with `0` to `9`. For example, `foo_bar_17`. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. Field paths may be used in other contexts to refer to structured fields defined here. For `map_value`, the field path is represented by the simple or quoted field names of the containing fields, delimited by `.`. For example, the structured field `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be represented by the field path `foo.x&y`. Within a field path, a quoted field name starts and ends with `` ` `` and may contain any character. Some characters, including `` ` ``, must be escaped using a `\`. For example, `` `x&y` `` represents `x&y` and `` `bak\`tik` `` represents `` bak`tik ``. "a_key": { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1870,16 +1882,31 @@

Method Details

{ # A transformation of a field of the document. "appendMissingElements": { # An array value. # Append the given elements in order if they are not already present in the current field value. If the field is not an array, or if the field does not yet exist, it is first set to the empty array. Equivalent numbers of different types (e.g. 3L and 3.0) are considered equal when checking if a value is missing. NaN is equal to NaN, and Null is equal to Null. If the input contains multiple equivalent values, only the first will be considered. The corresponding transform_result will be the null value. "values": [ # Values in the array. - # Object with schema name: Value + { # A message that can hold any of the supported value types. + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. + "booleanValue": True or False, # A boolean value. + "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. + "doubleValue": 3.14, # A double value. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "mapValue": { # A map value. # A map value. + "fields": { # The map's fields. The map keys represent field names. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. + "a_key": # Object with schema name: Value + }, + }, + "nullValue": "A String", # A null value. + "referenceValue": "A String", # A reference to a document. For example: `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + "stringValue": "A String", # A string value. The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes of the UTF-8 representation are considered by queries. + "timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. + }, ], }, "fieldPath": "A String", # The path of the field. See Document.fields for the field path syntax reference. "increment": { # A message that can hold any of the supported value types. # Adds the given value to the field's current value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the given value. If either of the given value or the current field value are doubles, both values will be interpreted as doubles. Double arithmetic and representation of double values follow IEEE 754 semantics. If there is positive/negative integer overflow, the field is resolved to the largest magnitude positive/negative integer. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1899,11 +1926,7 @@

Method Details

"timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. }, "maximum": { # A message that can hold any of the supported value types. # Sets the field to the maximum of its current value and the given value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the given value. If a maximum operation is applied where the field and the input value are of mixed types (that is - one is an integer and one is a double) the field takes on the type of the larger operand. If the operands are equivalent (e.g. 3 and 3.0), the field does not change. 0, 0.0, and -0.0 are all zero. The maximum of a zero stored value and zero input value is always the stored value. The maximum of any numeric value x and NaN is NaN. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1923,11 +1946,7 @@

Method Details

"timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. }, "minimum": { # A message that can hold any of the supported value types. # Sets the field to the minimum of its current value and the given value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the input value. If a minimum operation is applied where the field and the input value are of mixed types (that is - one is an integer and one is a double) the field takes on the type of the smaller operand. If the operands are equivalent (e.g. 3 and 3.0), the field does not change. 0, 0.0, and -0.0 are all zero. The minimum of a zero stored value and zero input value is always the stored value. The minimum of any numeric value x and NaN is NaN. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1948,7 +1967,26 @@

Method Details

}, "removeAllFromArray": { # An array value. # Remove all of the given elements from the array in the field. If the field is not an array, or if the field does not yet exist, it is set to the empty array. Equivalent numbers of the different types (e.g. 3L and 3.0) are considered equal when deciding whether an element should be removed. NaN is equal to NaN, and Null is equal to Null. This will remove all equivalent values if there are duplicates. The corresponding transform_result will be the null value. "values": [ # Values in the array. - # Object with schema name: Value + { # A message that can hold any of the supported value types. + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. + "booleanValue": True or False, # A boolean value. + "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. + "doubleValue": 3.14, # A double value. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "mapValue": { # A map value. # A map value. + "fields": { # The map's fields. The map keys represent field names. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. + "a_key": # Object with schema name: Value + }, + }, + "nullValue": "A String", # A null value. + "referenceValue": "A String", # A reference to a document. For example: `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + "stringValue": "A String", # A string value. The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes of the UTF-8 representation are considered by queries. + "timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. + }, ], }, "setToServerValue": "A String", # Sets the field to the given server value. @@ -1959,11 +1997,7 @@

Method Details

"createTime": "A String", # Output only. The time at which the document was created. This value increases monotonically when a document is deleted then recreated. It can also be compared to values from other documents and the `read_time` of a query. "fields": { # The document's fields. The map keys represent field names. A simple field name contains only characters `a` to `z`, `A` to `Z`, `0` to `9`, or `_`, and must not start with `0` to `9`. For example, `foo_bar_17`. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. Field paths may be used in other contexts to refer to structured fields defined here. For `map_value`, the field path is represented by the simple or quoted field names of the containing fields, delimited by `.`. For example, the structured field `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be represented by the field path `foo.x&y`. Within a field path, a quoted field name starts and ends with `` ` `` and may contain any character. Some characters, including `` ` ``, must be escaped using a `\`. For example, `` `x&y` `` represents `x&y` and `` `bak\`tik` `` represents `` bak`tik ``. "a_key": { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1995,16 +2029,31 @@

Method Details

{ # A transformation of a field of the document. "appendMissingElements": { # An array value. # Append the given elements in order if they are not already present in the current field value. If the field is not an array, or if the field does not yet exist, it is first set to the empty array. Equivalent numbers of different types (e.g. 3L and 3.0) are considered equal when checking if a value is missing. NaN is equal to NaN, and Null is equal to Null. If the input contains multiple equivalent values, only the first will be considered. The corresponding transform_result will be the null value. "values": [ # Values in the array. - # Object with schema name: Value + { # A message that can hold any of the supported value types. + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. + "booleanValue": True or False, # A boolean value. + "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. + "doubleValue": 3.14, # A double value. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "mapValue": { # A map value. # A map value. + "fields": { # The map's fields. The map keys represent field names. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. + "a_key": # Object with schema name: Value + }, + }, + "nullValue": "A String", # A null value. + "referenceValue": "A String", # A reference to a document. For example: `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + "stringValue": "A String", # A string value. The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes of the UTF-8 representation are considered by queries. + "timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. + }, ], }, "fieldPath": "A String", # The path of the field. See Document.fields for the field path syntax reference. "increment": { # A message that can hold any of the supported value types. # Adds the given value to the field's current value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the given value. If either of the given value or the current field value are doubles, both values will be interpreted as doubles. Double arithmetic and representation of double values follow IEEE 754 semantics. If there is positive/negative integer overflow, the field is resolved to the largest magnitude positive/negative integer. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -2024,11 +2073,7 @@

Method Details

"timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. }, "maximum": { # A message that can hold any of the supported value types. # Sets the field to the maximum of its current value and the given value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the given value. If a maximum operation is applied where the field and the input value are of mixed types (that is - one is an integer and one is a double) the field takes on the type of the larger operand. If the operands are equivalent (e.g. 3 and 3.0), the field does not change. 0, 0.0, and -0.0 are all zero. The maximum of a zero stored value and zero input value is always the stored value. The maximum of any numeric value x and NaN is NaN. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -2048,11 +2093,7 @@

Method Details

"timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. }, "minimum": { # A message that can hold any of the supported value types. # Sets the field to the minimum of its current value and the given value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the input value. If a minimum operation is applied where the field and the input value are of mixed types (that is - one is an integer and one is a double) the field takes on the type of the smaller operand. If the operands are equivalent (e.g. 3 and 3.0), the field does not change. 0, 0.0, and -0.0 are all zero. The minimum of a zero stored value and zero input value is always the stored value. The minimum of any numeric value x and NaN is NaN. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -2073,7 +2114,26 @@

Method Details

}, "removeAllFromArray": { # An array value. # Remove all of the given elements from the array in the field. If the field is not an array, or if the field does not yet exist, it is set to the empty array. Equivalent numbers of the different types (e.g. 3L and 3.0) are considered equal when deciding whether an element should be removed. NaN is equal to NaN, and Null is equal to Null. This will remove all equivalent values if there are duplicates. The corresponding transform_result will be the null value. "values": [ # Values in the array. - # Object with schema name: Value + { # A message that can hold any of the supported value types. + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. + "booleanValue": True or False, # A boolean value. + "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. + "doubleValue": 3.14, # A double value. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "mapValue": { # A map value. # A map value. + "fields": { # The map's fields. The map keys represent field names. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. + "a_key": # Object with schema name: Value + }, + }, + "nullValue": "A String", # A null value. + "referenceValue": "A String", # A reference to a document. For example: `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + "stringValue": "A String", # A string value. The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes of the UTF-8 representation are considered by queries. + "timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. + }, ], }, "setToServerValue": "A String", # Sets the field to the given server value. @@ -2099,11 +2159,7 @@

Method Details

{ # The result of applying a write. "transformResults": [ # The results of applying each DocumentTransform.FieldTransform, in the same order. { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. diff --git a/docs/dyn/groupssettings_v1.groups.html b/docs/dyn/groupssettings_v1.groups.html index 7dd16d14eff..246b38e4313 100644 --- a/docs/dyn/groupssettings_v1.groups.html +++ b/docs/dyn/groupssettings_v1.groups.html @@ -126,6 +126,7 @@

Method Details

# - true # - false "defaultMessageDenyNotificationText": "A String", # When a message is rejected, this is text for the rejection notification sent to the message's author. By default, this property is empty and has no value in the API's response body. The maximum notification text size is 10,000 characters. Note: Requires sendMessageDenyNotification property to be true. + "default_sender": "A String", # Default sender for members who can post messages as the group. Possible values are: - `DEFAULT_SELF`: By default messages will be sent from the user - `GROUP`: By default messages will be sent from the group "description": "A String", # Description of the group. This property value may be an empty string if no group description has been entered. If entered, the maximum group description is no more than 300 characters. "email": "A String", # The group's email address. This property can be updated using the Directory API. Note: Only a group owner can change a group's email address. A group manager can't do this. # When you change your group's address using the Directory API or the control panel, you are changing the address your subscribers use to send email and the web address people use to access your group. People can't reach your group by visiting the old address. @@ -402,6 +403,7 @@

Method Details

# - true # - false "defaultMessageDenyNotificationText": "A String", # When a message is rejected, this is text for the rejection notification sent to the message's author. By default, this property is empty and has no value in the API's response body. The maximum notification text size is 10,000 characters. Note: Requires sendMessageDenyNotification property to be true. + "default_sender": "A String", # Default sender for members who can post messages as the group. Possible values are: - `DEFAULT_SELF`: By default messages will be sent from the user - `GROUP`: By default messages will be sent from the group "description": "A String", # Description of the group. This property value may be an empty string if no group description has been entered. If entered, the maximum group description is no more than 300 characters. "email": "A String", # The group's email address. This property can be updated using the Directory API. Note: Only a group owner can change a group's email address. A group manager can't do this. # When you change your group's address using the Directory API or the control panel, you are changing the address your subscribers use to send email and the web address people use to access your group. People can't reach your group by visiting the old address. @@ -672,6 +674,7 @@

Method Details

# - true # - false "defaultMessageDenyNotificationText": "A String", # When a message is rejected, this is text for the rejection notification sent to the message's author. By default, this property is empty and has no value in the API's response body. The maximum notification text size is 10,000 characters. Note: Requires sendMessageDenyNotification property to be true. + "default_sender": "A String", # Default sender for members who can post messages as the group. Possible values are: - `DEFAULT_SELF`: By default messages will be sent from the user - `GROUP`: By default messages will be sent from the group "description": "A String", # Description of the group. This property value may be an empty string if no group description has been entered. If entered, the maximum group description is no more than 300 characters. "email": "A String", # The group's email address. This property can be updated using the Directory API. Note: Only a group owner can change a group's email address. A group manager can't do this. # When you change your group's address using the Directory API or the control panel, you are changing the address your subscribers use to send email and the web address people use to access your group. People can't reach your group by visiting the old address. @@ -948,6 +951,7 @@

Method Details

# - true # - false "defaultMessageDenyNotificationText": "A String", # When a message is rejected, this is text for the rejection notification sent to the message's author. By default, this property is empty and has no value in the API's response body. The maximum notification text size is 10,000 characters. Note: Requires sendMessageDenyNotification property to be true. + "default_sender": "A String", # Default sender for members who can post messages as the group. Possible values are: - `DEFAULT_SELF`: By default messages will be sent from the user - `GROUP`: By default messages will be sent from the group "description": "A String", # Description of the group. This property value may be an empty string if no group description has been entered. If entered, the maximum group description is no more than 300 characters. "email": "A String", # The group's email address. This property can be updated using the Directory API. Note: Only a group owner can change a group's email address. A group manager can't do this. # When you change your group's address using the Directory API or the control panel, you are changing the address your subscribers use to send email and the web address people use to access your group. People can't reach your group by visiting the old address. @@ -1218,6 +1222,7 @@

Method Details

# - true # - false "defaultMessageDenyNotificationText": "A String", # When a message is rejected, this is text for the rejection notification sent to the message's author. By default, this property is empty and has no value in the API's response body. The maximum notification text size is 10,000 characters. Note: Requires sendMessageDenyNotification property to be true. + "default_sender": "A String", # Default sender for members who can post messages as the group. Possible values are: - `DEFAULT_SELF`: By default messages will be sent from the user - `GROUP`: By default messages will be sent from the group "description": "A String", # Description of the group. This property value may be an empty string if no group description has been entered. If entered, the maximum group description is no more than 300 characters. "email": "A String", # The group's email address. This property can be updated using the Directory API. Note: Only a group owner can change a group's email address. A group manager can't do this. # When you change your group's address using the Directory API or the control panel, you are changing the address your subscribers use to send email and the web address people use to access your group. People can't reach your group by visiting the old address. diff --git a/docs/dyn/index.md b/docs/dyn/index.md index 8c18b630523..c8a6c4c1616 100644 --- a/docs/dyn/index.md +++ b/docs/dyn/index.md @@ -120,6 +120,7 @@ ## baremetalsolution * [v1](http://googleapis.github.io/google-api-python-client/docs/dyn/baremetalsolution_v1.html) +* [v1alpha1](http://googleapis.github.io/google-api-python-client/docs/dyn/baremetalsolution_v1alpha1.html) ## bigquery @@ -881,6 +882,7 @@ ## sqladmin +* [v1](http://googleapis.github.io/google-api-python-client/docs/dyn/sqladmin_v1.html) * [v1beta4](http://googleapis.github.io/google-api-python-client/docs/dyn/sqladmin_v1beta4.html) @@ -952,6 +954,7 @@ ## verifiedaccess * [v1](http://googleapis.github.io/google-api-python-client/docs/dyn/verifiedaccess_v1.html) +* [v2](http://googleapis.github.io/google-api-python-client/docs/dyn/verifiedaccess_v2.html) ## videointelligence diff --git a/docs/dyn/logging_v2.billingAccounts.html b/docs/dyn/logging_v2.billingAccounts.html index e5a75f7bc7e..0bd568dcb61 100644 --- a/docs/dyn/logging_v2.billingAccounts.html +++ b/docs/dyn/logging_v2.billingAccounts.html @@ -94,6 +94,11 @@

Instance Methods

Returns the logs Resource.

+

+ operations() +

+

Returns the operations Resource.

+

sinks()

diff --git a/docs/dyn/logging_v2.billingAccounts.locations.html b/docs/dyn/logging_v2.billingAccounts.locations.html index 4e276e6848c..dc52ed4bcb4 100644 --- a/docs/dyn/logging_v2.billingAccounts.locations.html +++ b/docs/dyn/logging_v2.billingAccounts.locations.html @@ -79,6 +79,11 @@

Instance Methods

Returns the buckets Resource.

+

+ operations() +

+

Returns the operations Resource.

+

close()

Close httplib2 connections.

diff --git a/docs/dyn/logging_v2.billingAccounts.locations.operations.html b/docs/dyn/logging_v2.billingAccounts.locations.operations.html new file mode 100644 index 00000000000..35fbc9fc5fa --- /dev/null +++ b/docs/dyn/logging_v2.billingAccounts.locations.operations.html @@ -0,0 +1,176 @@ + + + +

Cloud Logging API . billingAccounts . locations . operations

+

Instance Methods

+

+ cancel(name, body=None, x__xgafv=None)

+

Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.

+

+ close()

+

Close httplib2 connections.

+

+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as "/v1/{name=users/*}/operations" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.

+

+ list_next(previous_request, previous_response)

+

Retrieves the next page of results.

+

Method Details

+
+ cancel(name, body=None, x__xgafv=None) +
Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.
+
+Args:
+  name: string, The name of the operation resource to be cancelled. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The request message for Operations.CancelOperation.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}.
+}
+
+ +
+ close() +
Close httplib2 connections.
+
+ +
+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as "/v1/{name=users/*}/operations" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.
+
+Args:
+  name: string, The name of the operation's parent resource. (required)
+  filter: string, The standard list filter.
+  pageSize: integer, The standard list page size.
+  pageToken: string, The standard list page token.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response message for Operations.ListOperations.
+  "nextPageToken": "A String", # The standard List next-page token.
+  "operations": [ # A list of operations that matches the specified filter in the request.
+    { # This resource represents a long-running operation that is the result of a network API call.
+      "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.
+      "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+      "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}.
+      "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    },
+  ],
+}
+
+ +
+ list_next(previous_request, previous_response) +
Retrieves the next page of results.
+
+Args:
+  previous_request: The request for the previous page. (required)
+  previous_response: The response from the request for the previous page. (required)
+
+Returns:
+  A request object that you can call 'execute()' on to request the next
+  page. Returns None if there are no more items in the collection.
+    
+
+ + \ No newline at end of file diff --git a/docs/dyn/logging_v2.billingAccounts.operations.html b/docs/dyn/logging_v2.billingAccounts.operations.html new file mode 100644 index 00000000000..310bf78a836 --- /dev/null +++ b/docs/dyn/logging_v2.billingAccounts.operations.html @@ -0,0 +1,124 @@ + + + +

Cloud Logging API . billingAccounts . operations

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ get(name, x__xgafv=None)

+

Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ get(name, x__xgafv=None) +
Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
+
+Args:
+  name: string, The name of the operation resource. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.
+  "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}.
+  "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/logging_v2.entries.html b/docs/dyn/logging_v2.entries.html index 251d06dd5b8..eca163df2e0 100644 --- a/docs/dyn/logging_v2.entries.html +++ b/docs/dyn/logging_v2.entries.html @@ -77,6 +77,9 @@

Instance Methods

close()

Close httplib2 connections.

+

+ copy(body=None, x__xgafv=None)

+

Copies a set of log entries from a logging bucket to a Cloud Storage bucket.

list(body=None, x__xgafv=None)

Lists log entries. Use this method to retrieve log entries that originated from a project/folder/organization/billing account. For ways to export log entries, see Exporting Logs (https://cloud.google.com/logging/docs/export).

@@ -95,6 +98,49 @@

Method Details

Close httplib2 connections.
+
+ copy(body=None, x__xgafv=None) +
Copies a set of log entries from a logging bucket to a Cloud Storage bucket.
+
+Args:
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The parameters to CopyLogEntries.
+  "destination": "A String", # Required. Destination to which to copy logs.
+  "filter": "A String", # Optional. A filter specifying which log entries to copy. The filter must be no more than 20k characters. An empty filter matches all log entries.
+  "name": "A String", # Required. Bucket from which to copy logs. e.g. "projects/my-project/locations/my-location/buckets/my-source-bucket
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.
+  "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}.
+  "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+
list(body=None, x__xgafv=None)
Lists log entries. Use this method to retrieve log entries that originated from a project/folder/organization/billing account. For ways to export log entries, see Exporting Logs (https://cloud.google.com/logging/docs/export).
diff --git a/docs/dyn/logging_v2.folders.locations.html b/docs/dyn/logging_v2.folders.locations.html
index d94bd359d08..a9072c1fe84 100644
--- a/docs/dyn/logging_v2.folders.locations.html
+++ b/docs/dyn/logging_v2.folders.locations.html
@@ -79,6 +79,11 @@ 

Instance Methods

Returns the buckets Resource.

+

+ operations() +

+

Returns the operations Resource.

+

close()

Close httplib2 connections.

diff --git a/docs/dyn/logging_v2.folders.locations.operations.html b/docs/dyn/logging_v2.folders.locations.operations.html new file mode 100644 index 00000000000..6751d73a973 --- /dev/null +++ b/docs/dyn/logging_v2.folders.locations.operations.html @@ -0,0 +1,214 @@ + + + +

Cloud Logging API . folders . locations . operations

+

Instance Methods

+

+ cancel(name, body=None, x__xgafv=None)

+

Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.

+

+ close()

+

Close httplib2 connections.

+

+ get(name, x__xgafv=None)

+

Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.

+

+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as "/v1/{name=users/*}/operations" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.

+

+ list_next(previous_request, previous_response)

+

Retrieves the next page of results.

+

Method Details

+
+ cancel(name, body=None, x__xgafv=None) +
Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.
+
+Args:
+  name: string, The name of the operation resource to be cancelled. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The request message for Operations.CancelOperation.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}.
+}
+
+ +
+ close() +
Close httplib2 connections.
+
+ +
+ get(name, x__xgafv=None) +
Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
+
+Args:
+  name: string, The name of the operation resource. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.
+  "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}.
+  "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as "/v1/{name=users/*}/operations" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.
+
+Args:
+  name: string, The name of the operation's parent resource. (required)
+  filter: string, The standard list filter.
+  pageSize: integer, The standard list page size.
+  pageToken: string, The standard list page token.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response message for Operations.ListOperations.
+  "nextPageToken": "A String", # The standard List next-page token.
+  "operations": [ # A list of operations that matches the specified filter in the request.
+    { # This resource represents a long-running operation that is the result of a network API call.
+      "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.
+      "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+      "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}.
+      "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    },
+  ],
+}
+
+ +
+ list_next(previous_request, previous_response) +
Retrieves the next page of results.
+
+Args:
+  previous_request: The request for the previous page. (required)
+  previous_response: The response from the request for the previous page. (required)
+
+Returns:
+  A request object that you can call 'execute()' on to request the next
+  page. Returns None if there are no more items in the collection.
+    
+
+ + \ No newline at end of file diff --git a/docs/dyn/logging_v2.locations.html b/docs/dyn/logging_v2.locations.html index fbc439eab72..0ac52eef7f0 100644 --- a/docs/dyn/logging_v2.locations.html +++ b/docs/dyn/logging_v2.locations.html @@ -79,6 +79,11 @@

Instance Methods

Returns the buckets Resource.

+

+ operations() +

+

Returns the operations Resource.

+

close()

Close httplib2 connections.

diff --git a/docs/dyn/logging_v2.locations.operations.html b/docs/dyn/logging_v2.locations.operations.html new file mode 100644 index 00000000000..e25174e2d97 --- /dev/null +++ b/docs/dyn/logging_v2.locations.operations.html @@ -0,0 +1,214 @@ + + + +

Cloud Logging API . locations . operations

+

Instance Methods

+

+ cancel(name, body=None, x__xgafv=None)

+

Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.

+

+ close()

+

Close httplib2 connections.

+

+ get(name, x__xgafv=None)

+

Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.

+

+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as "/v1/{name=users/*}/operations" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.

+

+ list_next(previous_request, previous_response)

+

Retrieves the next page of results.

+

Method Details

+
+ cancel(name, body=None, x__xgafv=None) +
Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.
+
+Args:
+  name: string, The name of the operation resource to be cancelled. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The request message for Operations.CancelOperation.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}.
+}
+
+ +
+ close() +
Close httplib2 connections.
+
+ +
+ get(name, x__xgafv=None) +
Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
+
+Args:
+  name: string, The name of the operation resource. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.
+  "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}.
+  "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as "/v1/{name=users/*}/operations" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.
+
+Args:
+  name: string, The name of the operation's parent resource. (required)
+  filter: string, The standard list filter.
+  pageSize: integer, The standard list page size.
+  pageToken: string, The standard list page token.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response message for Operations.ListOperations.
+  "nextPageToken": "A String", # The standard List next-page token.
+  "operations": [ # A list of operations that matches the specified filter in the request.
+    { # This resource represents a long-running operation that is the result of a network API call.
+      "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.
+      "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+      "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}.
+      "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    },
+  ],
+}
+
+ +
+ list_next(previous_request, previous_response) +
Retrieves the next page of results.
+
+Args:
+  previous_request: The request for the previous page. (required)
+  previous_response: The response from the request for the previous page. (required)
+
+Returns:
+  A request object that you can call 'execute()' on to request the next
+  page. Returns None if there are no more items in the collection.
+    
+
+ + \ No newline at end of file diff --git a/docs/dyn/logging_v2.organizations.locations.html b/docs/dyn/logging_v2.organizations.locations.html index ced3a5b26fa..f07b2a55453 100644 --- a/docs/dyn/logging_v2.organizations.locations.html +++ b/docs/dyn/logging_v2.organizations.locations.html @@ -79,6 +79,11 @@

Instance Methods

Returns the buckets Resource.

+

+ operations() +

+

Returns the operations Resource.

+

close()

Close httplib2 connections.

diff --git a/docs/dyn/logging_v2.organizations.locations.operations.html b/docs/dyn/logging_v2.organizations.locations.operations.html new file mode 100644 index 00000000000..1ffaa2c9328 --- /dev/null +++ b/docs/dyn/logging_v2.organizations.locations.operations.html @@ -0,0 +1,214 @@ + + + +

Cloud Logging API . organizations . locations . operations

+

Instance Methods

+

+ cancel(name, body=None, x__xgafv=None)

+

Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.

+

+ close()

+

Close httplib2 connections.

+

+ get(name, x__xgafv=None)

+

Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.

+

+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as "/v1/{name=users/*}/operations" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.

+

+ list_next(previous_request, previous_response)

+

Retrieves the next page of results.

+

Method Details

+
+ cancel(name, body=None, x__xgafv=None) +
Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.
+
+Args:
+  name: string, The name of the operation resource to be cancelled. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The request message for Operations.CancelOperation.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}.
+}
+
+ +
+ close() +
Close httplib2 connections.
+
+ +
+ get(name, x__xgafv=None) +
Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
+
+Args:
+  name: string, The name of the operation resource. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.
+  "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}.
+  "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as "/v1/{name=users/*}/operations" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.
+
+Args:
+  name: string, The name of the operation's parent resource. (required)
+  filter: string, The standard list filter.
+  pageSize: integer, The standard list page size.
+  pageToken: string, The standard list page token.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response message for Operations.ListOperations.
+  "nextPageToken": "A String", # The standard List next-page token.
+  "operations": [ # A list of operations that matches the specified filter in the request.
+    { # This resource represents a long-running operation that is the result of a network API call.
+      "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.
+      "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+      "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}.
+      "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    },
+  ],
+}
+
+ +
+ list_next(previous_request, previous_response) +
Retrieves the next page of results.
+
+Args:
+  previous_request: The request for the previous page. (required)
+  previous_response: The response from the request for the previous page. (required)
+
+Returns:
+  A request object that you can call 'execute()' on to request the next
+  page. Returns None if there are no more items in the collection.
+    
+
+ + \ No newline at end of file diff --git a/docs/dyn/logging_v2.projects.locations.html b/docs/dyn/logging_v2.projects.locations.html index ee7fbd93a52..37033159ba2 100644 --- a/docs/dyn/logging_v2.projects.locations.html +++ b/docs/dyn/logging_v2.projects.locations.html @@ -79,6 +79,11 @@

Instance Methods

Returns the buckets Resource.

+

+ operations() +

+

Returns the operations Resource.

+

close()

Close httplib2 connections.

diff --git a/docs/dyn/logging_v2.projects.locations.operations.html b/docs/dyn/logging_v2.projects.locations.operations.html new file mode 100644 index 00000000000..05bd20ba0d2 --- /dev/null +++ b/docs/dyn/logging_v2.projects.locations.operations.html @@ -0,0 +1,214 @@ + + + +

Cloud Logging API . projects . locations . operations

+

Instance Methods

+

+ cancel(name, body=None, x__xgafv=None)

+

Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.

+

+ close()

+

Close httplib2 connections.

+

+ get(name, x__xgafv=None)

+

Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.

+

+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as "/v1/{name=users/*}/operations" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.

+

+ list_next(previous_request, previous_response)

+

Retrieves the next page of results.

+

Method Details

+
+ cancel(name, body=None, x__xgafv=None) +
Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.
+
+Args:
+  name: string, The name of the operation resource to be cancelled. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The request message for Operations.CancelOperation.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}.
+}
+
+ +
+ close() +
Close httplib2 connections.
+
+ +
+ get(name, x__xgafv=None) +
Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
+
+Args:
+  name: string, The name of the operation resource. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.
+  "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}.
+  "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as "/v1/{name=users/*}/operations" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.
+
+Args:
+  name: string, The name of the operation's parent resource. (required)
+  filter: string, The standard list filter.
+  pageSize: integer, The standard list page size.
+  pageToken: string, The standard list page token.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response message for Operations.ListOperations.
+  "nextPageToken": "A String", # The standard List next-page token.
+  "operations": [ # A list of operations that matches the specified filter in the request.
+    { # This resource represents a long-running operation that is the result of a network API call.
+      "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.
+      "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+      "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}.
+      "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    },
+  ],
+}
+
+ +
+ list_next(previous_request, previous_response) +
Retrieves the next page of results.
+
+Args:
+  previous_request: The request for the previous page. (required)
+  previous_response: The response from the request for the previous page. (required)
+
+Returns:
+  A request object that you can call 'execute()' on to request the next
+  page. Returns None if there are no more items in the collection.
+    
+
+ + \ No newline at end of file diff --git a/docs/dyn/monitoring_v1.projects.dashboards.html b/docs/dyn/monitoring_v1.projects.dashboards.html index 7146c53c1fe..5456b9020fe 100644 --- a/docs/dyn/monitoring_v1.projects.dashboards.html +++ b/docs/dyn/monitoring_v1.projects.dashboards.html @@ -117,6 +117,9 @@

Method Details

"weight": "A String", # The relative weight of this column. The column weight is used to adjust the width of columns on the screen (relative to peers). Greater the weight, greater the width of the column on the screen. If omitted, a value of 1 is used while rendering. "widgets": [ # The display widgets arranged vertically in this column. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -328,6 +331,9 @@

Method Details

"columns": "A String", # The number of columns into which the view's width is divided. If omitted or set to zero, a system default will be used while rendering. "widgets": [ # The informational elements that are arranged into the columns row-first. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -537,6 +543,9 @@

Method Details

{ # A single tile in the mosaic. The placement and size of the tile are configurable. "height": 42, # The height of the tile, measured in grid blocks. Tiles must have a minimum height of 1. "widget": { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. # The informational widget contained in the tile. For example an XyChart. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -751,6 +760,9 @@

Method Details

"weight": "A String", # The relative weight of this row. The row weight is used to adjust the height of rows on the screen (relative to peers). Greater the weight, greater the height of the row on the screen. If omitted, a value of 1 is used while rendering. "widgets": [ # The display widgets arranged horizontally in this row. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -974,6 +986,9 @@

Method Details

"weight": "A String", # The relative weight of this column. The column weight is used to adjust the width of columns on the screen (relative to peers). Greater the weight, greater the width of the column on the screen. If omitted, a value of 1 is used while rendering. "widgets": [ # The display widgets arranged vertically in this column. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -1185,6 +1200,9 @@

Method Details

"columns": "A String", # The number of columns into which the view's width is divided. If omitted or set to zero, a system default will be used while rendering. "widgets": [ # The informational elements that are arranged into the columns row-first. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -1394,6 +1412,9 @@

Method Details

{ # A single tile in the mosaic. The placement and size of the tile are configurable. "height": 42, # The height of the tile, measured in grid blocks. Tiles must have a minimum height of 1. "widget": { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. # The informational widget contained in the tile. For example an XyChart. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -1608,6 +1629,9 @@

Method Details

"weight": "A String", # The relative weight of this row. The row weight is used to adjust the height of rows on the screen (relative to peers). Greater the weight, greater the height of the row on the screen. If omitted, a value of 1 is used while rendering. "widgets": [ # The display widgets arranged horizontally in this row. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -1855,6 +1879,9 @@

Method Details

"weight": "A String", # The relative weight of this column. The column weight is used to adjust the width of columns on the screen (relative to peers). Greater the weight, greater the width of the column on the screen. If omitted, a value of 1 is used while rendering. "widgets": [ # The display widgets arranged vertically in this column. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -2066,6 +2093,9 @@

Method Details

"columns": "A String", # The number of columns into which the view's width is divided. If omitted or set to zero, a system default will be used while rendering. "widgets": [ # The informational elements that are arranged into the columns row-first. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -2275,6 +2305,9 @@

Method Details

{ # A single tile in the mosaic. The placement and size of the tile are configurable. "height": 42, # The height of the tile, measured in grid blocks. Tiles must have a minimum height of 1. "widget": { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. # The informational widget contained in the tile. For example an XyChart. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -2489,6 +2522,9 @@

Method Details

"weight": "A String", # The relative weight of this row. The row weight is used to adjust the height of rows on the screen (relative to peers). Greater the weight, greater the height of the row on the screen. If omitted, a value of 1 is used while rendering. "widgets": [ # The display widgets arranged horizontally in this row. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -2722,6 +2758,9 @@

Method Details

"weight": "A String", # The relative weight of this column. The column weight is used to adjust the width of columns on the screen (relative to peers). Greater the weight, greater the width of the column on the screen. If omitted, a value of 1 is used while rendering. "widgets": [ # The display widgets arranged vertically in this column. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -2933,6 +2972,9 @@

Method Details

"columns": "A String", # The number of columns into which the view's width is divided. If omitted or set to zero, a system default will be used while rendering. "widgets": [ # The informational elements that are arranged into the columns row-first. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -3142,6 +3184,9 @@

Method Details

{ # A single tile in the mosaic. The placement and size of the tile are configurable. "height": 42, # The height of the tile, measured in grid blocks. Tiles must have a minimum height of 1. "widget": { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. # The informational widget contained in the tile. For example an XyChart. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -3356,6 +3401,9 @@

Method Details

"weight": "A String", # The relative weight of this row. The row weight is used to adjust the height of rows on the screen (relative to peers). Greater the weight, greater the height of the row on the screen. If omitted, a value of 1 is used while rendering. "widgets": [ # The display widgets arranged horizontally in this row. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -3597,6 +3645,9 @@

Method Details

"weight": "A String", # The relative weight of this column. The column weight is used to adjust the width of columns on the screen (relative to peers). Greater the weight, greater the width of the column on the screen. If omitted, a value of 1 is used while rendering. "widgets": [ # The display widgets arranged vertically in this column. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -3808,6 +3859,9 @@

Method Details

"columns": "A String", # The number of columns into which the view's width is divided. If omitted or set to zero, a system default will be used while rendering. "widgets": [ # The informational elements that are arranged into the columns row-first. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -4017,6 +4071,9 @@

Method Details

{ # A single tile in the mosaic. The placement and size of the tile are configurable. "height": 42, # The height of the tile, measured in grid blocks. Tiles must have a minimum height of 1. "widget": { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. # The informational widget contained in the tile. For example an XyChart. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -4231,6 +4288,9 @@

Method Details

"weight": "A String", # The relative weight of this row. The row weight is used to adjust the height of rows on the screen (relative to peers). Greater the weight, greater the height of the row on the screen. If omitted, a value of 1 is used while rendering. "widgets": [ # The display widgets arranged horizontally in this row. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -4454,6 +4514,9 @@

Method Details

"weight": "A String", # The relative weight of this column. The column weight is used to adjust the width of columns on the screen (relative to peers). Greater the weight, greater the width of the column on the screen. If omitted, a value of 1 is used while rendering. "widgets": [ # The display widgets arranged vertically in this column. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -4665,6 +4728,9 @@

Method Details

"columns": "A String", # The number of columns into which the view's width is divided. If omitted or set to zero, a system default will be used while rendering. "widgets": [ # The informational elements that are arranged into the columns row-first. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -4874,6 +4940,9 @@

Method Details

{ # A single tile in the mosaic. The placement and size of the tile are configurable. "height": 42, # The height of the tile, measured in grid blocks. Tiles must have a minimum height of 1. "widget": { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. # The informational widget contained in the tile. For example an XyChart. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -5088,6 +5157,9 @@

Method Details

"weight": "A String", # The relative weight of this row. The row weight is used to adjust the height of rows on the screen (relative to peers). Greater the weight, greater the height of the row on the screen. If omitted, a value of 1 is used while rendering. "widgets": [ # The display widgets arranged horizontally in this row. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. diff --git a/docs/dyn/monitoring_v3.projects.alertPolicies.html b/docs/dyn/monitoring_v3.projects.alertPolicies.html index ac128989acd..78696e94800 100644 --- a/docs/dyn/monitoring_v3.projects.alertPolicies.html +++ b/docs/dyn/monitoring_v3.projects.alertPolicies.html @@ -111,6 +111,11 @@

Method Details

The object takes the form of: { # A description of the conditions under which some aspect of your system is considered to be "unhealthy" and the ways to notify people or services about this state. For an overview of alert policies, see Introduction to Alerting (https://cloud.google.com/monitoring/alerts/). + "alertStrategy": { # Control over how the notification channels in notification_channels are notified when this alert fires. # Control over how this alert policy's notification channels are notified. + "notificationRateLimit": { # Control over the rate of notifications sent to this alert policy's notification channels. # Required for alert policies with a LogMatch condition.Providing this for alert policies that are not log-based is unimplemented. + "period": "A String", # Not more than one notification per period. + }, + }, "combiner": "A String", # How to combine the results of multiple conditions to determine if an incident should be opened. If condition_time_series_query_language is present, this must be COMBINE_UNSPECIFIED. "conditions": [ # A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. If condition_time_series_query_language is present, it must be the only condition. { # A condition is a true/false test that determines when an alerting policy should open an incident. If a condition evaluates to true, it signifies that something is wrong. @@ -132,6 +137,12 @@

Method Details

"percent": 3.14, # The percentage of time series that must fail the predicate for the condition to be triggered. }, }, + "conditionMatchedLog": { # A condition type that checks whether a log message from any project monitored by the alert policy’s workspace satisfies the given filter. # A condition that checks for log messages matching given constraints. If set, no other conditions can be present. + "filter": "A String", # Required. A logs-based filter. See Advanced Logs Queries for how this filter should be constructed. + "labelExtractors": { # Optional. A map from a label key to an extractor expression, which is used to extract the value for this label key. Each entry in this map is a specification for how data should be extracted from log entries that match filter. Each combination of extracted values is treated as a separate rule for the purposes of triggering notifications. Label keys and corresponding values can be used in notifications generated by this condition.Please see the documentation on logs-based metric valueExtractors for syntax and examples. + "a_key": "A String", + }, + }, "conditionMonitoringQueryLanguage": { # A condition type that allows alert policies to be defined using Monitoring Query Language (https://cloud.google.com/monitoring/mql). # A condition that uses the Monitoring Query Language to define alerts. "duration": "A String", # The amount of time that a time series must violate the threshold to be considered failing. Currently, only values that are a multiple of a minute--e.g., 0, 60, 120, or 300 seconds--are supported. If an invalid value is given, an error will be returned. When choosing a duration, it is useful to keep in mind the frequency of the underlying time series data (which may also be affected by any alignments specified in the aggregations field); a good duration is long enough so that a single outlier does not generate spurious alerts, but short enough that unhealthy states are detected and alerted on quickly. "query": "A String", # Monitoring Query Language (https://cloud.google.com/monitoring/mql) query that outputs a boolean stream. @@ -216,6 +227,11 @@

Method Details

An object of the form: { # A description of the conditions under which some aspect of your system is considered to be "unhealthy" and the ways to notify people or services about this state. For an overview of alert policies, see Introduction to Alerting (https://cloud.google.com/monitoring/alerts/). + "alertStrategy": { # Control over how the notification channels in notification_channels are notified when this alert fires. # Control over how this alert policy's notification channels are notified. + "notificationRateLimit": { # Control over the rate of notifications sent to this alert policy's notification channels. # Required for alert policies with a LogMatch condition.Providing this for alert policies that are not log-based is unimplemented. + "period": "A String", # Not more than one notification per period. + }, + }, "combiner": "A String", # How to combine the results of multiple conditions to determine if an incident should be opened. If condition_time_series_query_language is present, this must be COMBINE_UNSPECIFIED. "conditions": [ # A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. If condition_time_series_query_language is present, it must be the only condition. { # A condition is a true/false test that determines when an alerting policy should open an incident. If a condition evaluates to true, it signifies that something is wrong. @@ -237,6 +253,12 @@

Method Details

"percent": 3.14, # The percentage of time series that must fail the predicate for the condition to be triggered. }, }, + "conditionMatchedLog": { # A condition type that checks whether a log message from any project monitored by the alert policy’s workspace satisfies the given filter. # A condition that checks for log messages matching given constraints. If set, no other conditions can be present. + "filter": "A String", # Required. A logs-based filter. See Advanced Logs Queries for how this filter should be constructed. + "labelExtractors": { # Optional. A map from a label key to an extractor expression, which is used to extract the value for this label key. Each entry in this map is a specification for how data should be extracted from log entries that match filter. Each combination of extracted values is treated as a separate rule for the purposes of triggering notifications. Label keys and corresponding values can be used in notifications generated by this condition.Please see the documentation on logs-based metric valueExtractors for syntax and examples. + "a_key": "A String", + }, + }, "conditionMonitoringQueryLanguage": { # A condition type that allows alert policies to be defined using Monitoring Query Language (https://cloud.google.com/monitoring/mql). # A condition that uses the Monitoring Query Language to define alerts. "duration": "A String", # The amount of time that a time series must violate the threshold to be considered failing. Currently, only values that are a multiple of a minute--e.g., 0, 60, 120, or 300 seconds--are supported. If an invalid value is given, an error will be returned. When choosing a duration, it is useful to keep in mind the frequency of the underlying time series data (which may also be affected by any alignments specified in the aggregations field); a good duration is long enough so that a single outlier does not generate spurious alerts, but short enough that unhealthy states are detected and alerted on quickly. "query": "A String", # Monitoring Query Language (https://cloud.google.com/monitoring/mql) query that outputs a boolean stream. @@ -346,6 +368,11 @@

Method Details

An object of the form: { # A description of the conditions under which some aspect of your system is considered to be "unhealthy" and the ways to notify people or services about this state. For an overview of alert policies, see Introduction to Alerting (https://cloud.google.com/monitoring/alerts/). + "alertStrategy": { # Control over how the notification channels in notification_channels are notified when this alert fires. # Control over how this alert policy's notification channels are notified. + "notificationRateLimit": { # Control over the rate of notifications sent to this alert policy's notification channels. # Required for alert policies with a LogMatch condition.Providing this for alert policies that are not log-based is unimplemented. + "period": "A String", # Not more than one notification per period. + }, + }, "combiner": "A String", # How to combine the results of multiple conditions to determine if an incident should be opened. If condition_time_series_query_language is present, this must be COMBINE_UNSPECIFIED. "conditions": [ # A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. If condition_time_series_query_language is present, it must be the only condition. { # A condition is a true/false test that determines when an alerting policy should open an incident. If a condition evaluates to true, it signifies that something is wrong. @@ -367,6 +394,12 @@

Method Details

"percent": 3.14, # The percentage of time series that must fail the predicate for the condition to be triggered. }, }, + "conditionMatchedLog": { # A condition type that checks whether a log message from any project monitored by the alert policy’s workspace satisfies the given filter. # A condition that checks for log messages matching given constraints. If set, no other conditions can be present. + "filter": "A String", # Required. A logs-based filter. See Advanced Logs Queries for how this filter should be constructed. + "labelExtractors": { # Optional. A map from a label key to an extractor expression, which is used to extract the value for this label key. Each entry in this map is a specification for how data should be extracted from log entries that match filter. Each combination of extracted values is treated as a separate rule for the purposes of triggering notifications. Label keys and corresponding values can be used in notifications generated by this condition.Please see the documentation on logs-based metric valueExtractors for syntax and examples. + "a_key": "A String", + }, + }, "conditionMonitoringQueryLanguage": { # A condition type that allows alert policies to be defined using Monitoring Query Language (https://cloud.google.com/monitoring/mql). # A condition that uses the Monitoring Query Language to define alerts. "duration": "A String", # The amount of time that a time series must violate the threshold to be considered failing. Currently, only values that are a multiple of a minute--e.g., 0, 60, 120, or 300 seconds--are supported. If an invalid value is given, an error will be returned. When choosing a duration, it is useful to keep in mind the frequency of the underlying time series data (which may also be affected by any alignments specified in the aggregations field); a good duration is long enough so that a single outlier does not generate spurious alerts, but short enough that unhealthy states are detected and alerted on quickly. "query": "A String", # Monitoring Query Language (https://cloud.google.com/monitoring/mql) query that outputs a boolean stream. @@ -464,6 +497,11 @@

Method Details

{ # The protocol for the ListAlertPolicies response. "alertPolicies": [ # The returned alert policies. { # A description of the conditions under which some aspect of your system is considered to be "unhealthy" and the ways to notify people or services about this state. For an overview of alert policies, see Introduction to Alerting (https://cloud.google.com/monitoring/alerts/). + "alertStrategy": { # Control over how the notification channels in notification_channels are notified when this alert fires. # Control over how this alert policy's notification channels are notified. + "notificationRateLimit": { # Control over the rate of notifications sent to this alert policy's notification channels. # Required for alert policies with a LogMatch condition.Providing this for alert policies that are not log-based is unimplemented. + "period": "A String", # Not more than one notification per period. + }, + }, "combiner": "A String", # How to combine the results of multiple conditions to determine if an incident should be opened. If condition_time_series_query_language is present, this must be COMBINE_UNSPECIFIED. "conditions": [ # A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. If condition_time_series_query_language is present, it must be the only condition. { # A condition is a true/false test that determines when an alerting policy should open an incident. If a condition evaluates to true, it signifies that something is wrong. @@ -485,6 +523,12 @@

Method Details

"percent": 3.14, # The percentage of time series that must fail the predicate for the condition to be triggered. }, }, + "conditionMatchedLog": { # A condition type that checks whether a log message from any project monitored by the alert policy’s workspace satisfies the given filter. # A condition that checks for log messages matching given constraints. If set, no other conditions can be present. + "filter": "A String", # Required. A logs-based filter. See Advanced Logs Queries for how this filter should be constructed. + "labelExtractors": { # Optional. A map from a label key to an extractor expression, which is used to extract the value for this label key. Each entry in this map is a specification for how data should be extracted from log entries that match filter. Each combination of extracted values is treated as a separate rule for the purposes of triggering notifications. Label keys and corresponding values can be used in notifications generated by this condition.Please see the documentation on logs-based metric valueExtractors for syntax and examples. + "a_key": "A String", + }, + }, "conditionMonitoringQueryLanguage": { # A condition type that allows alert policies to be defined using Monitoring Query Language (https://cloud.google.com/monitoring/mql). # A condition that uses the Monitoring Query Language to define alerts. "duration": "A String", # The amount of time that a time series must violate the threshold to be considered failing. Currently, only values that are a multiple of a minute--e.g., 0, 60, 120, or 300 seconds--are supported. If an invalid value is given, an error will be returned. When choosing a duration, it is useful to keep in mind the frequency of the underlying time series data (which may also be affected by any alignments specified in the aggregations field); a good duration is long enough so that a single outlier does not generate spurious alerts, but short enough that unhealthy states are detected and alerted on quickly. "query": "A String", # Monitoring Query Language (https://cloud.google.com/monitoring/mql) query that outputs a boolean stream. @@ -589,6 +633,11 @@

Method Details

The object takes the form of: { # A description of the conditions under which some aspect of your system is considered to be "unhealthy" and the ways to notify people or services about this state. For an overview of alert policies, see Introduction to Alerting (https://cloud.google.com/monitoring/alerts/). + "alertStrategy": { # Control over how the notification channels in notification_channels are notified when this alert fires. # Control over how this alert policy's notification channels are notified. + "notificationRateLimit": { # Control over the rate of notifications sent to this alert policy's notification channels. # Required for alert policies with a LogMatch condition.Providing this for alert policies that are not log-based is unimplemented. + "period": "A String", # Not more than one notification per period. + }, + }, "combiner": "A String", # How to combine the results of multiple conditions to determine if an incident should be opened. If condition_time_series_query_language is present, this must be COMBINE_UNSPECIFIED. "conditions": [ # A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. If condition_time_series_query_language is present, it must be the only condition. { # A condition is a true/false test that determines when an alerting policy should open an incident. If a condition evaluates to true, it signifies that something is wrong. @@ -610,6 +659,12 @@

Method Details

"percent": 3.14, # The percentage of time series that must fail the predicate for the condition to be triggered. }, }, + "conditionMatchedLog": { # A condition type that checks whether a log message from any project monitored by the alert policy’s workspace satisfies the given filter. # A condition that checks for log messages matching given constraints. If set, no other conditions can be present. + "filter": "A String", # Required. A logs-based filter. See Advanced Logs Queries for how this filter should be constructed. + "labelExtractors": { # Optional. A map from a label key to an extractor expression, which is used to extract the value for this label key. Each entry in this map is a specification for how data should be extracted from log entries that match filter. Each combination of extracted values is treated as a separate rule for the purposes of triggering notifications. Label keys and corresponding values can be used in notifications generated by this condition.Please see the documentation on logs-based metric valueExtractors for syntax and examples. + "a_key": "A String", + }, + }, "conditionMonitoringQueryLanguage": { # A condition type that allows alert policies to be defined using Monitoring Query Language (https://cloud.google.com/monitoring/mql). # A condition that uses the Monitoring Query Language to define alerts. "duration": "A String", # The amount of time that a time series must violate the threshold to be considered failing. Currently, only values that are a multiple of a minute--e.g., 0, 60, 120, or 300 seconds--are supported. If an invalid value is given, an error will be returned. When choosing a duration, it is useful to keep in mind the frequency of the underlying time series data (which may also be affected by any alignments specified in the aggregations field); a good duration is long enough so that a single outlier does not generate spurious alerts, but short enough that unhealthy states are detected and alerted on quickly. "query": "A String", # Monitoring Query Language (https://cloud.google.com/monitoring/mql) query that outputs a boolean stream. @@ -695,6 +750,11 @@

Method Details

An object of the form: { # A description of the conditions under which some aspect of your system is considered to be "unhealthy" and the ways to notify people or services about this state. For an overview of alert policies, see Introduction to Alerting (https://cloud.google.com/monitoring/alerts/). + "alertStrategy": { # Control over how the notification channels in notification_channels are notified when this alert fires. # Control over how this alert policy's notification channels are notified. + "notificationRateLimit": { # Control over the rate of notifications sent to this alert policy's notification channels. # Required for alert policies with a LogMatch condition.Providing this for alert policies that are not log-based is unimplemented. + "period": "A String", # Not more than one notification per period. + }, + }, "combiner": "A String", # How to combine the results of multiple conditions to determine if an incident should be opened. If condition_time_series_query_language is present, this must be COMBINE_UNSPECIFIED. "conditions": [ # A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. If condition_time_series_query_language is present, it must be the only condition. { # A condition is a true/false test that determines when an alerting policy should open an incident. If a condition evaluates to true, it signifies that something is wrong. @@ -716,6 +776,12 @@

Method Details

"percent": 3.14, # The percentage of time series that must fail the predicate for the condition to be triggered. }, }, + "conditionMatchedLog": { # A condition type that checks whether a log message from any project monitored by the alert policy’s workspace satisfies the given filter. # A condition that checks for log messages matching given constraints. If set, no other conditions can be present. + "filter": "A String", # Required. A logs-based filter. See Advanced Logs Queries for how this filter should be constructed. + "labelExtractors": { # Optional. A map from a label key to an extractor expression, which is used to extract the value for this label key. Each entry in this map is a specification for how data should be extracted from log entries that match filter. Each combination of extracted values is treated as a separate rule for the purposes of triggering notifications. Label keys and corresponding values can be used in notifications generated by this condition.Please see the documentation on logs-based metric valueExtractors for syntax and examples. + "a_key": "A String", + }, + }, "conditionMonitoringQueryLanguage": { # A condition type that allows alert policies to be defined using Monitoring Query Language (https://cloud.google.com/monitoring/mql). # A condition that uses the Monitoring Query Language to define alerts. "duration": "A String", # The amount of time that a time series must violate the threshold to be considered failing. Currently, only values that are a multiple of a minute--e.g., 0, 60, 120, or 300 seconds--are supported. If an invalid value is given, an error will be returned. When choosing a duration, it is useful to keep in mind the frequency of the underlying time series data (which may also be affected by any alignments specified in the aggregations field); a good duration is long enough so that a single outlier does not generate spurious alerts, but short enough that unhealthy states are detected and alerted on quickly. "query": "A String", # Monitoring Query Language (https://cloud.google.com/monitoring/mql) query that outputs a boolean stream. diff --git a/docs/dyn/monitoring_v3.services.html b/docs/dyn/monitoring_v3.services.html index 0a1109a7cff..5f40b23183f 100644 --- a/docs/dyn/monitoring_v3.services.html +++ b/docs/dyn/monitoring_v3.services.html @@ -145,6 +145,9 @@

Method Details

"telemetry": { # Configuration for how to query telemetry on a Service. # Configuration for how to query telemetry on a Service. "resourceName": "A String", # The full name of the resource that defines this service. Formatted as described in https://cloud.google.com/apis/design/resource_names. }, + "userLabels": { # Labels which have been used to annotate the service. Label keys must start with a letter. Label keys and values may contain lowercase letters, numbers, underscores, and dashes. Label keys and values have a maximum length of 63 characters, and must be less than 128 bytes in size. Up to 64 label entries may be stored. For labels which do not have a semantic value, the empty string may be supplied for the label value. + "a_key": "A String", + }, } serviceId: string, Optional. The Service id to use for this Service. If omitted, an id will be generated instead. Must match the pattern [a-z0-9\-]+ @@ -186,6 +189,9 @@

Method Details

"telemetry": { # Configuration for how to query telemetry on a Service. # Configuration for how to query telemetry on a Service. "resourceName": "A String", # The full name of the resource that defines this service. Formatted as described in https://cloud.google.com/apis/design/resource_names. }, + "userLabels": { # Labels which have been used to annotate the service. Label keys must start with a letter. Label keys and values may contain lowercase letters, numbers, underscores, and dashes. Label keys and values have a maximum length of 63 characters, and must be less than 128 bytes in size. Up to 64 label entries may be stored. For labels which do not have a semantic value, the empty string may be supplied for the label value. + "a_key": "A String", + }, }
@@ -251,6 +257,9 @@

Method Details

"telemetry": { # Configuration for how to query telemetry on a Service. # Configuration for how to query telemetry on a Service. "resourceName": "A String", # The full name of the resource that defines this service. Formatted as described in https://cloud.google.com/apis/design/resource_names. }, + "userLabels": { # Labels which have been used to annotate the service. Label keys must start with a letter. Label keys and values may contain lowercase letters, numbers, underscores, and dashes. Label keys and values have a maximum length of 63 characters, and must be less than 128 bytes in size. Up to 64 label entries may be stored. For labels which do not have a semantic value, the empty string may be supplied for the label value. + "a_key": "A String", + }, } @@ -304,6 +313,9 @@

Method Details

"telemetry": { # Configuration for how to query telemetry on a Service. # Configuration for how to query telemetry on a Service. "resourceName": "A String", # The full name of the resource that defines this service. Formatted as described in https://cloud.google.com/apis/design/resource_names. }, + "userLabels": { # Labels which have been used to annotate the service. Label keys must start with a letter. Label keys and values may contain lowercase letters, numbers, underscores, and dashes. Label keys and values have a maximum length of 63 characters, and must be less than 128 bytes in size. Up to 64 label entries may be stored. For labels which do not have a semantic value, the empty string may be supplied for the label value. + "a_key": "A String", + }, }, ], } @@ -362,6 +374,9 @@

Method Details

"telemetry": { # Configuration for how to query telemetry on a Service. # Configuration for how to query telemetry on a Service. "resourceName": "A String", # The full name of the resource that defines this service. Formatted as described in https://cloud.google.com/apis/design/resource_names. }, + "userLabels": { # Labels which have been used to annotate the service. Label keys must start with a letter. Label keys and values may contain lowercase letters, numbers, underscores, and dashes. Label keys and values have a maximum length of 63 characters, and must be less than 128 bytes in size. Up to 64 label entries may be stored. For labels which do not have a semantic value, the empty string may be supplied for the label value. + "a_key": "A String", + }, } updateMask: string, A set of field paths defining which fields to use for the update. @@ -403,6 +418,9 @@

Method Details

"telemetry": { # Configuration for how to query telemetry on a Service. # Configuration for how to query telemetry on a Service. "resourceName": "A String", # The full name of the resource that defines this service. Formatted as described in https://cloud.google.com/apis/design/resource_names. }, + "userLabels": { # Labels which have been used to annotate the service. Label keys must start with a letter. Label keys and values may contain lowercase letters, numbers, underscores, and dashes. Label keys and values have a maximum length of 63 characters, and must be less than 128 bytes in size. Up to 64 label entries may be stored. For labels which do not have a semantic value, the empty string may be supplied for the label value. + "a_key": "A String", + }, } diff --git a/docs/dyn/paymentsresellersubscription_v1.partners.subscriptions.html b/docs/dyn/paymentsresellersubscription_v1.partners.subscriptions.html index 6607b07f380..aa1d497b482 100644 --- a/docs/dyn/paymentsresellersubscription_v1.partners.subscriptions.html +++ b/docs/dyn/paymentsresellersubscription_v1.partners.subscriptions.html @@ -76,7 +76,7 @@

Payments Reseller Subscriptio

Instance Methods

cancel(name, body=None, x__xgafv=None)

-

Used by partners to cancel a subscription service by the end of the current billing cycle for their customers. It should be called directly by the partner using service accounts.

+

Used by partners to cancel a subscription service either immediately or by the end of the current billing cycle for their customers. It should be called directly by the partner using service accounts.

close()

Close httplib2 connections.

@@ -101,7 +101,7 @@

Instance Methods

Method Details

cancel(name, body=None, x__xgafv=None) -
Used by partners to cancel a subscription service by the end of the current billing cycle for their customers. It should be called directly by the partner using service accounts.
+  
Used by partners to cancel a subscription service either immediately or by the end of the current billing cycle for their customers. It should be called directly by the partner using service accounts.
 
 Args:
   name: string, Required. The name of the subscription resource to be cancelled. It will have the format of "partners/{partner_id}/subscriptions/{subscription_id}" (required)
@@ -123,7 +123,7 @@ 

Method Details

{ "subscription": { # A Subscription resource managed by 3P Partners. # The cancelled subscription resource. - "cancellationDetails": { # Describes the details of a cancelled subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. + "cancellationDetails": { # Describes the details of a cancelled or cancelling subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. "reason": "A String", # The reason of the cancellation. }, "createTime": "A String", # Output only. System generated timestamp when the subscription is created. UTC timezone. @@ -132,6 +132,7 @@

Method Details

"freeTrialEndTime": "A String", # Output only. End of the free trial period, in ISO 8061 format. For example, "2019-08-31T17:28:54.564Z". It will be set the same as createTime if no free trial promotion is specified. "name": "A String", # Output only. Response only. Resource name of the subscription. It will have the format of "partners/{partner_id}/subscriptions/{subscription_id}" "partnerUserToken": "A String", # Required. Identifier of the end-user in partner’s system. The value is restricted to 63 ASCII characters at the maximum. + "processingState": "A String", # Output only. Describes the processing state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "products": [ # Required. Resource name that identifies one or more subscription products. The format will be 'partners/{partner_id}/products/{product_id}'. "A String", ], @@ -143,7 +144,7 @@

Method Details

"postalCode": "A String", # The postal code this location refers to. Ex. "94043" "regionCode": "A String", # 2-letter ISO region code for current content region. Ex. “US” Please refers to: https://en.wikipedia.org/wiki/ISO_3166-1 }, - "state": "A String", # Output only. Descibes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). + "state": "A String", # Output only. Describes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "updateTime": "A String", # Output only. System generated timestamp when the subscription is most recently updated. UTC timezone. "upgradeDowngradeDetails": { # Details about the previous subscription that this new subscription upgrades/downgrades from. # Optional. Details about the previous subscription that this new subscription upgrades/downgrades from. Only populated if this subscription is an upgrade/downgrade from another subscription. "billingCycleSpec": "A String", # Required. Specifies the billing cycle spec for the new upgraded/downgraded subscription. @@ -168,7 +169,7 @@

Method Details

The object takes the form of: { # A Subscription resource managed by 3P Partners. - "cancellationDetails": { # Describes the details of a cancelled subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. + "cancellationDetails": { # Describes the details of a cancelled or cancelling subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. "reason": "A String", # The reason of the cancellation. }, "createTime": "A String", # Output only. System generated timestamp when the subscription is created. UTC timezone. @@ -177,6 +178,7 @@

Method Details

"freeTrialEndTime": "A String", # Output only. End of the free trial period, in ISO 8061 format. For example, "2019-08-31T17:28:54.564Z". It will be set the same as createTime if no free trial promotion is specified. "name": "A String", # Output only. Response only. Resource name of the subscription. It will have the format of "partners/{partner_id}/subscriptions/{subscription_id}" "partnerUserToken": "A String", # Required. Identifier of the end-user in partner’s system. The value is restricted to 63 ASCII characters at the maximum. + "processingState": "A String", # Output only. Describes the processing state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "products": [ # Required. Resource name that identifies one or more subscription products. The format will be 'partners/{partner_id}/products/{product_id}'. "A String", ], @@ -188,7 +190,7 @@

Method Details

"postalCode": "A String", # The postal code this location refers to. Ex. "94043" "regionCode": "A String", # 2-letter ISO region code for current content region. Ex. “US” Please refers to: https://en.wikipedia.org/wiki/ISO_3166-1 }, - "state": "A String", # Output only. Descibes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). + "state": "A String", # Output only. Describes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "updateTime": "A String", # Output only. System generated timestamp when the subscription is most recently updated. UTC timezone. "upgradeDowngradeDetails": { # Details about the previous subscription that this new subscription upgrades/downgrades from. # Optional. Details about the previous subscription that this new subscription upgrades/downgrades from. Only populated if this subscription is an upgrade/downgrade from another subscription. "billingCycleSpec": "A String", # Required. Specifies the billing cycle spec for the new upgraded/downgraded subscription. @@ -206,7 +208,7 @@

Method Details

An object of the form: { # A Subscription resource managed by 3P Partners. - "cancellationDetails": { # Describes the details of a cancelled subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. + "cancellationDetails": { # Describes the details of a cancelled or cancelling subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. "reason": "A String", # The reason of the cancellation. }, "createTime": "A String", # Output only. System generated timestamp when the subscription is created. UTC timezone. @@ -215,6 +217,7 @@

Method Details

"freeTrialEndTime": "A String", # Output only. End of the free trial period, in ISO 8061 format. For example, "2019-08-31T17:28:54.564Z". It will be set the same as createTime if no free trial promotion is specified. "name": "A String", # Output only. Response only. Resource name of the subscription. It will have the format of "partners/{partner_id}/subscriptions/{subscription_id}" "partnerUserToken": "A String", # Required. Identifier of the end-user in partner’s system. The value is restricted to 63 ASCII characters at the maximum. + "processingState": "A String", # Output only. Describes the processing state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "products": [ # Required. Resource name that identifies one or more subscription products. The format will be 'partners/{partner_id}/products/{product_id}'. "A String", ], @@ -226,7 +229,7 @@

Method Details

"postalCode": "A String", # The postal code this location refers to. Ex. "94043" "regionCode": "A String", # 2-letter ISO region code for current content region. Ex. “US” Please refers to: https://en.wikipedia.org/wiki/ISO_3166-1 }, - "state": "A String", # Output only. Descibes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). + "state": "A String", # Output only. Describes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "updateTime": "A String", # Output only. System generated timestamp when the subscription is most recently updated. UTC timezone. "upgradeDowngradeDetails": { # Details about the previous subscription that this new subscription upgrades/downgrades from. # Optional. Details about the previous subscription that this new subscription upgrades/downgrades from. Only populated if this subscription is an upgrade/downgrade from another subscription. "billingCycleSpec": "A String", # Required. Specifies the billing cycle spec for the new upgraded/downgraded subscription. @@ -257,7 +260,7 @@

Method Details

{ "subscription": { # A Subscription resource managed by 3P Partners. # The subscription that has user linked to it. - "cancellationDetails": { # Describes the details of a cancelled subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. + "cancellationDetails": { # Describes the details of a cancelled or cancelling subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. "reason": "A String", # The reason of the cancellation. }, "createTime": "A String", # Output only. System generated timestamp when the subscription is created. UTC timezone. @@ -266,6 +269,7 @@

Method Details

"freeTrialEndTime": "A String", # Output only. End of the free trial period, in ISO 8061 format. For example, "2019-08-31T17:28:54.564Z". It will be set the same as createTime if no free trial promotion is specified. "name": "A String", # Output only. Response only. Resource name of the subscription. It will have the format of "partners/{partner_id}/subscriptions/{subscription_id}" "partnerUserToken": "A String", # Required. Identifier of the end-user in partner’s system. The value is restricted to 63 ASCII characters at the maximum. + "processingState": "A String", # Output only. Describes the processing state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "products": [ # Required. Resource name that identifies one or more subscription products. The format will be 'partners/{partner_id}/products/{product_id}'. "A String", ], @@ -277,7 +281,7 @@

Method Details

"postalCode": "A String", # The postal code this location refers to. Ex. "94043" "regionCode": "A String", # 2-letter ISO region code for current content region. Ex. “US” Please refers to: https://en.wikipedia.org/wiki/ISO_3166-1 }, - "state": "A String", # Output only. Descibes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). + "state": "A String", # Output only. Describes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "updateTime": "A String", # Output only. System generated timestamp when the subscription is most recently updated. UTC timezone. "upgradeDowngradeDetails": { # Details about the previous subscription that this new subscription upgrades/downgrades from. # Optional. Details about the previous subscription that this new subscription upgrades/downgrades from. Only populated if this subscription is an upgrade/downgrade from another subscription. "billingCycleSpec": "A String", # Required. Specifies the billing cycle spec for the new upgraded/downgraded subscription. @@ -336,7 +340,7 @@

Method Details

An object of the form: { # A Subscription resource managed by 3P Partners. - "cancellationDetails": { # Describes the details of a cancelled subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. + "cancellationDetails": { # Describes the details of a cancelled or cancelling subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. "reason": "A String", # The reason of the cancellation. }, "createTime": "A String", # Output only. System generated timestamp when the subscription is created. UTC timezone. @@ -345,6 +349,7 @@

Method Details

"freeTrialEndTime": "A String", # Output only. End of the free trial period, in ISO 8061 format. For example, "2019-08-31T17:28:54.564Z". It will be set the same as createTime if no free trial promotion is specified. "name": "A String", # Output only. Response only. Resource name of the subscription. It will have the format of "partners/{partner_id}/subscriptions/{subscription_id}" "partnerUserToken": "A String", # Required. Identifier of the end-user in partner’s system. The value is restricted to 63 ASCII characters at the maximum. + "processingState": "A String", # Output only. Describes the processing state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "products": [ # Required. Resource name that identifies one or more subscription products. The format will be 'partners/{partner_id}/products/{product_id}'. "A String", ], @@ -356,7 +361,7 @@

Method Details

"postalCode": "A String", # The postal code this location refers to. Ex. "94043" "regionCode": "A String", # 2-letter ISO region code for current content region. Ex. “US” Please refers to: https://en.wikipedia.org/wiki/ISO_3166-1 }, - "state": "A String", # Output only. Descibes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). + "state": "A String", # Output only. Describes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "updateTime": "A String", # Output only. System generated timestamp when the subscription is most recently updated. UTC timezone. "upgradeDowngradeDetails": { # Details about the previous subscription that this new subscription upgrades/downgrades from. # Optional. Details about the previous subscription that this new subscription upgrades/downgrades from. Only populated if this subscription is an upgrade/downgrade from another subscription. "billingCycleSpec": "A String", # Required. Specifies the billing cycle spec for the new upgraded/downgraded subscription. @@ -375,7 +380,7 @@

Method Details

The object takes the form of: { # A Subscription resource managed by 3P Partners. - "cancellationDetails": { # Describes the details of a cancelled subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. + "cancellationDetails": { # Describes the details of a cancelled or cancelling subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. "reason": "A String", # The reason of the cancellation. }, "createTime": "A String", # Output only. System generated timestamp when the subscription is created. UTC timezone. @@ -384,6 +389,7 @@

Method Details

"freeTrialEndTime": "A String", # Output only. End of the free trial period, in ISO 8061 format. For example, "2019-08-31T17:28:54.564Z". It will be set the same as createTime if no free trial promotion is specified. "name": "A String", # Output only. Response only. Resource name of the subscription. It will have the format of "partners/{partner_id}/subscriptions/{subscription_id}" "partnerUserToken": "A String", # Required. Identifier of the end-user in partner’s system. The value is restricted to 63 ASCII characters at the maximum. + "processingState": "A String", # Output only. Describes the processing state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "products": [ # Required. Resource name that identifies one or more subscription products. The format will be 'partners/{partner_id}/products/{product_id}'. "A String", ], @@ -395,7 +401,7 @@

Method Details

"postalCode": "A String", # The postal code this location refers to. Ex. "94043" "regionCode": "A String", # 2-letter ISO region code for current content region. Ex. “US” Please refers to: https://en.wikipedia.org/wiki/ISO_3166-1 }, - "state": "A String", # Output only. Descibes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). + "state": "A String", # Output only. Describes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "updateTime": "A String", # Output only. System generated timestamp when the subscription is most recently updated. UTC timezone. "upgradeDowngradeDetails": { # Details about the previous subscription that this new subscription upgrades/downgrades from. # Optional. Details about the previous subscription that this new subscription upgrades/downgrades from. Only populated if this subscription is an upgrade/downgrade from another subscription. "billingCycleSpec": "A String", # Required. Specifies the billing cycle spec for the new upgraded/downgraded subscription. @@ -413,7 +419,7 @@

Method Details

An object of the form: { # A Subscription resource managed by 3P Partners. - "cancellationDetails": { # Describes the details of a cancelled subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. + "cancellationDetails": { # Describes the details of a cancelled or cancelling subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. "reason": "A String", # The reason of the cancellation. }, "createTime": "A String", # Output only. System generated timestamp when the subscription is created. UTC timezone. @@ -422,6 +428,7 @@

Method Details

"freeTrialEndTime": "A String", # Output only. End of the free trial period, in ISO 8061 format. For example, "2019-08-31T17:28:54.564Z". It will be set the same as createTime if no free trial promotion is specified. "name": "A String", # Output only. Response only. Resource name of the subscription. It will have the format of "partners/{partner_id}/subscriptions/{subscription_id}" "partnerUserToken": "A String", # Required. Identifier of the end-user in partner’s system. The value is restricted to 63 ASCII characters at the maximum. + "processingState": "A String", # Output only. Describes the processing state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "products": [ # Required. Resource name that identifies one or more subscription products. The format will be 'partners/{partner_id}/products/{product_id}'. "A String", ], @@ -433,7 +440,7 @@

Method Details

"postalCode": "A String", # The postal code this location refers to. Ex. "94043" "regionCode": "A String", # 2-letter ISO region code for current content region. Ex. “US” Please refers to: https://en.wikipedia.org/wiki/ISO_3166-1 }, - "state": "A String", # Output only. Descibes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). + "state": "A String", # Output only. Describes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "updateTime": "A String", # Output only. System generated timestamp when the subscription is most recently updated. UTC timezone. "upgradeDowngradeDetails": { # Details about the previous subscription that this new subscription upgrades/downgrades from. # Optional. Details about the previous subscription that this new subscription upgrades/downgrades from. Only populated if this subscription is an upgrade/downgrade from another subscription. "billingCycleSpec": "A String", # Required. Specifies the billing cycle spec for the new upgraded/downgraded subscription. @@ -464,7 +471,7 @@

Method Details

{ # Response that contains the updated subscription resource. "subscription": { # A Subscription resource managed by 3P Partners. # The updated subscription resource. - "cancellationDetails": { # Describes the details of a cancelled subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. + "cancellationDetails": { # Describes the details of a cancelled or cancelling subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. "reason": "A String", # The reason of the cancellation. }, "createTime": "A String", # Output only. System generated timestamp when the subscription is created. UTC timezone. @@ -473,6 +480,7 @@

Method Details

"freeTrialEndTime": "A String", # Output only. End of the free trial period, in ISO 8061 format. For example, "2019-08-31T17:28:54.564Z". It will be set the same as createTime if no free trial promotion is specified. "name": "A String", # Output only. Response only. Resource name of the subscription. It will have the format of "partners/{partner_id}/subscriptions/{subscription_id}" "partnerUserToken": "A String", # Required. Identifier of the end-user in partner’s system. The value is restricted to 63 ASCII characters at the maximum. + "processingState": "A String", # Output only. Describes the processing state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "products": [ # Required. Resource name that identifies one or more subscription products. The format will be 'partners/{partner_id}/products/{product_id}'. "A String", ], @@ -484,7 +492,7 @@

Method Details

"postalCode": "A String", # The postal code this location refers to. Ex. "94043" "regionCode": "A String", # 2-letter ISO region code for current content region. Ex. “US” Please refers to: https://en.wikipedia.org/wiki/ISO_3166-1 }, - "state": "A String", # Output only. Descibes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). + "state": "A String", # Output only. Describes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "updateTime": "A String", # Output only. System generated timestamp when the subscription is most recently updated. UTC timezone. "upgradeDowngradeDetails": { # Details about the previous subscription that this new subscription upgrades/downgrades from. # Optional. Details about the previous subscription that this new subscription upgrades/downgrades from. Only populated if this subscription is an upgrade/downgrade from another subscription. "billingCycleSpec": "A String", # Required. Specifies the billing cycle spec for the new upgraded/downgraded subscription. diff --git a/docs/dyn/redis_v1.projects.locations.instances.html b/docs/dyn/redis_v1.projects.locations.instances.html index 6ccbd73f4d1..186925bd673 100644 --- a/docs/dyn/redis_v1.projects.locations.instances.html +++ b/docs/dyn/redis_v1.projects.locations.instances.html @@ -128,7 +128,7 @@

Method Details

body: object, The request body. The object takes the form of: -{ # A Google Cloud Redis instance. +{ # A Google Cloud Redis instance. next id = 30 "alternativeLocationId": "A String", # Optional. Only applicable to STANDARD_HA tier which protects the instance against zonal failures by provisioning it across two zones. If provided, it must be a different zone from the one provided in location_id. "authEnabled": True or False, # Optional. Indicates whether OSS Redis AUTH is enabled for the instance. If set to "true" AUTH is enabled on the instance. Default value is "false" meaning AUTH is disabled. "authorizedNetwork": "A String", # Optional. The full name of the Google Compute Engine [network](https://cloud.google.com/vpc/docs/vpc) to which the instance is connected. If left unspecified, the `default` network will be used. @@ -355,7 +355,7 @@

Method Details

Returns: An object of the form: - { # A Google Cloud Redis instance. + { # A Google Cloud Redis instance. next id = 30 "alternativeLocationId": "A String", # Optional. Only applicable to STANDARD_HA tier which protects the instance against zonal failures by provisioning it across two zones. If provided, it must be a different zone from the one provided in location_id. "authEnabled": True or False, # Optional. Indicates whether OSS Redis AUTH is enabled for the instance. If set to "true" AUTH is enabled on the instance. Default value is "false" meaning AUTH is disabled. "authorizedNetwork": "A String", # Optional. The full name of the Google Compute Engine [network](https://cloud.google.com/vpc/docs/vpc) to which the instance is connected. If left unspecified, the `default` network will be used. @@ -499,7 +499,7 @@

Method Details

{ # Response for ListInstances. "instances": [ # A list of Redis instances in the project in the specified location, or across all locations. If the `location_id` in the parent field of the request is "-", all regions available to the project are queried, and the results aggregated. If in such an aggregated query a location is unavailable, a placeholder Redis entry is included in the response with the `name` field set to a value of the form `projects/{project_id}/locations/{location_id}/instances/`- and the `status` field set to ERROR and `status_message` field set to "location not available for ListInstances". - { # A Google Cloud Redis instance. + { # A Google Cloud Redis instance. next id = 30 "alternativeLocationId": "A String", # Optional. Only applicable to STANDARD_HA tier which protects the instance against zonal failures by provisioning it across two zones. If provided, it must be a different zone from the one provided in location_id. "authEnabled": True or False, # Optional. Indicates whether OSS Redis AUTH is enabled for the instance. If set to "true" AUTH is enabled on the instance. Default value is "false" meaning AUTH is disabled. "authorizedNetwork": "A String", # Optional. The full name of the Google Compute Engine [network](https://cloud.google.com/vpc/docs/vpc) to which the instance is connected. If left unspecified, the `default` network will be used. @@ -589,7 +589,7 @@

Method Details

body: object, The request body. The object takes the form of: -{ # A Google Cloud Redis instance. +{ # A Google Cloud Redis instance. next id = 30 "alternativeLocationId": "A String", # Optional. Only applicable to STANDARD_HA tier which protects the instance against zonal failures by provisioning it across two zones. If provided, it must be a different zone from the one provided in location_id. "authEnabled": True or False, # Optional. Indicates whether OSS Redis AUTH is enabled for the instance. If set to "true" AUTH is enabled on the instance. Default value is "false" meaning AUTH is disabled. "authorizedNetwork": "A String", # Optional. The full name of the Google Compute Engine [network](https://cloud.google.com/vpc/docs/vpc) to which the instance is connected. If left unspecified, the `default` network will be used. diff --git a/docs/dyn/redis_v1beta1.projects.locations.instances.html b/docs/dyn/redis_v1beta1.projects.locations.instances.html index bae475a4beb..4b405ed5e82 100644 --- a/docs/dyn/redis_v1beta1.projects.locations.instances.html +++ b/docs/dyn/redis_v1beta1.projects.locations.instances.html @@ -128,7 +128,7 @@

Method Details

body: object, The request body. The object takes the form of: -{ # A Google Cloud Redis instance. +{ # A Google Cloud Redis instance. next id = 30 "alternativeLocationId": "A String", # Optional. Only applicable to STANDARD_HA tier which protects the instance against zonal failures by provisioning it across two zones. If provided, it must be a different zone from the one provided in location_id. "authEnabled": True or False, # Optional. Indicates whether OSS Redis AUTH is enabled for the instance. If set to "true" AUTH is enabled on the instance. Default value is "false" meaning AUTH is disabled. "authorizedNetwork": "A String", # Optional. The full name of the Google Compute Engine [network](https://cloud.google.com/vpc/docs/vpc) to which the instance is connected. If left unspecified, the `default` network will be used. @@ -355,7 +355,7 @@

Method Details

Returns: An object of the form: - { # A Google Cloud Redis instance. + { # A Google Cloud Redis instance. next id = 30 "alternativeLocationId": "A String", # Optional. Only applicable to STANDARD_HA tier which protects the instance against zonal failures by provisioning it across two zones. If provided, it must be a different zone from the one provided in location_id. "authEnabled": True or False, # Optional. Indicates whether OSS Redis AUTH is enabled for the instance. If set to "true" AUTH is enabled on the instance. Default value is "false" meaning AUTH is disabled. "authorizedNetwork": "A String", # Optional. The full name of the Google Compute Engine [network](https://cloud.google.com/vpc/docs/vpc) to which the instance is connected. If left unspecified, the `default` network will be used. @@ -499,7 +499,7 @@

Method Details

{ # Response for ListInstances. "instances": [ # A list of Redis instances in the project in the specified location, or across all locations. If the `location_id` in the parent field of the request is "-", all regions available to the project are queried, and the results aggregated. If in such an aggregated query a location is unavailable, a placeholder Redis entry is included in the response with the `name` field set to a value of the form `projects/{project_id}/locations/{location_id}/instances/`- and the `status` field set to ERROR and `status_message` field set to "location not available for ListInstances". - { # A Google Cloud Redis instance. + { # A Google Cloud Redis instance. next id = 30 "alternativeLocationId": "A String", # Optional. Only applicable to STANDARD_HA tier which protects the instance against zonal failures by provisioning it across two zones. If provided, it must be a different zone from the one provided in location_id. "authEnabled": True or False, # Optional. Indicates whether OSS Redis AUTH is enabled for the instance. If set to "true" AUTH is enabled on the instance. Default value is "false" meaning AUTH is disabled. "authorizedNetwork": "A String", # Optional. The full name of the Google Compute Engine [network](https://cloud.google.com/vpc/docs/vpc) to which the instance is connected. If left unspecified, the `default` network will be used. @@ -589,7 +589,7 @@

Method Details

body: object, The request body. The object takes the form of: -{ # A Google Cloud Redis instance. +{ # A Google Cloud Redis instance. next id = 30 "alternativeLocationId": "A String", # Optional. Only applicable to STANDARD_HA tier which protects the instance against zonal failures by provisioning it across two zones. If provided, it must be a different zone from the one provided in location_id. "authEnabled": True or False, # Optional. Indicates whether OSS Redis AUTH is enabled for the instance. If set to "true" AUTH is enabled on the instance. Default value is "false" meaning AUTH is disabled. "authorizedNetwork": "A String", # Optional. The full name of the Google Compute Engine [network](https://cloud.google.com/vpc/docs/vpc) to which the instance is connected. If left unspecified, the `default` network will be used. diff --git a/docs/dyn/retail_v2.projects.locations.catalogs.placements.html b/docs/dyn/retail_v2.projects.locations.catalogs.placements.html index 8a17f889129..c5cf29e6f49 100644 --- a/docs/dyn/retail_v2.projects.locations.catalogs.placements.html +++ b/docs/dyn/retail_v2.projects.locations.catalogs.placements.html @@ -102,7 +102,7 @@

Method Details

}, "pageSize": 42, # Maximum number of results to return per page. Set this property to the number of prediction results needed. If zero, the service will choose a reasonable default. The maximum allowed value is 100. Values above 100 will be coerced to 100. "pageToken": "A String", # The previous PredictResponse.next_page_token. - "params": { # Additional domain specific parameters for the predictions. Allowed values: * `returnProduct`: Boolean. If set to true, the associated product object will be returned in the `results.metadata` field in the prediction response. * `returnScore`: Boolean. If set to true, the prediction 'score' corresponding to each returned product will be set in the `results.metadata` field in the prediction response. The given 'score' indicates the probability of an product being clicked/purchased given the user's context and history. * `strictFiltering`: Boolean. True by default. If set to false, the service will return generic (unfiltered) popular products instead of empty if your filter blocks all prediction results. * `priceRerankLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-price-reranking', 'low-price-reranking', 'medium-price-reranking', 'high-price-reranking'}. This gives request level control and adjust prediction results based on product price. * `diversityLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-diversity', 'low-diversity', 'medium-diversity', 'high-diversity', 'auto-diversity'}. This gives request level control and adjust prediction results based on product category. + "params": { # Additional domain specific parameters for the predictions. Allowed values: * `returnProduct`: Boolean. If set to true, the associated product object will be returned in the `results.metadata` field in the prediction response. * `returnScore`: Boolean. If set to true, the prediction 'score' corresponding to each returned product will be set in the `results.metadata` field in the prediction response. The given 'score' indicates the probability of an product being clicked/purchased given the user's context and history. * `strictFiltering`: Boolean. True by default. If set to false, the service will return generic (unfiltered) popular products instead of empty if your filter blocks all prediction results. * `priceRerankLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-price-reranking', 'low-price-reranking', 'medium-price-reranking', 'high-price-reranking'}. This gives request-level control and adjusts prediction results based on product price. * `diversityLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-diversity', 'low-diversity', 'medium-diversity', 'high-diversity', 'auto-diversity'}. This gives request-level control and adjusts prediction results based on product category. "a_key": "", }, "userEvent": { # UserEvent captures all metadata information Retail API needs to know about how end users interact with customers' website. # Required. Context about the user, what they are looking at and what action they took to trigger the predict request. Note that this user event detail won't be ingested to userEvent logs. Thus, a separate userEvent write request is required for event logging. @@ -185,8 +185,8 @@

Method Details

"uri": "A String", # Complete URL (window.location.href) of the user's current page. When using the client side event reporting with JavaScript pixel and Google Tag Manager, this value is filled in automatically. Maximum length 5,000 characters. "userInfo": { # Information of an end user. # User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. - "ipAddress": "A String", # The end user's IP address. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. - "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "ipAddress": "A String", # The end user's IP address. Required for getting SearchRespons.sponsored_results. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "userAgent": "A String", # User agent as included in the HTTP header. Required for getting SearchRespons.sponsored_results. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. }, "visitorId": "A String", # Required. A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor log in/out of the website. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. The field should not contain PII or user-data. We recommend to use Google Analystics [Client ID](https://developers.google.com/analytics/devguides/collection/analyticsjs/field-reference#clientId) for this field. diff --git a/docs/dyn/retail_v2.projects.locations.catalogs.userEvents.html b/docs/dyn/retail_v2.projects.locations.catalogs.userEvents.html index 45bb8be3e73..7590a185ce1 100644 --- a/docs/dyn/retail_v2.projects.locations.catalogs.userEvents.html +++ b/docs/dyn/retail_v2.projects.locations.catalogs.userEvents.html @@ -235,8 +235,8 @@

Method Details

"uri": "A String", # Complete URL (window.location.href) of the user's current page. When using the client side event reporting with JavaScript pixel and Google Tag Manager, this value is filled in automatically. Maximum length 5,000 characters. "userInfo": { # Information of an end user. # User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. - "ipAddress": "A String", # The end user's IP address. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. - "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "ipAddress": "A String", # The end user's IP address. Required for getting SearchRespons.sponsored_results. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "userAgent": "A String", # User agent as included in the HTTP header. Required for getting SearchRespons.sponsored_results. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. }, "visitorId": "A String", # Required. A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor log in/out of the website. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. The field should not contain PII or user-data. We recommend to use Google Analystics [Client ID](https://developers.google.com/analytics/devguides/collection/analyticsjs/field-reference#clientId) for this field. @@ -449,8 +449,8 @@

Method Details

"uri": "A String", # Complete URL (window.location.href) of the user's current page. When using the client side event reporting with JavaScript pixel and Google Tag Manager, this value is filled in automatically. Maximum length 5,000 characters. "userInfo": { # Information of an end user. # User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. - "ipAddress": "A String", # The end user's IP address. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. - "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "ipAddress": "A String", # The end user's IP address. Required for getting SearchRespons.sponsored_results. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "userAgent": "A String", # User agent as included in the HTTP header. Required for getting SearchRespons.sponsored_results. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. }, "visitorId": "A String", # Required. A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor log in/out of the website. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. The field should not contain PII or user-data. We recommend to use Google Analystics [Client ID](https://developers.google.com/analytics/devguides/collection/analyticsjs/field-reference#clientId) for this field. @@ -544,8 +544,8 @@

Method Details

"uri": "A String", # Complete URL (window.location.href) of the user's current page. When using the client side event reporting with JavaScript pixel and Google Tag Manager, this value is filled in automatically. Maximum length 5,000 characters. "userInfo": { # Information of an end user. # User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. - "ipAddress": "A String", # The end user's IP address. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. - "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "ipAddress": "A String", # The end user's IP address. Required for getting SearchRespons.sponsored_results. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "userAgent": "A String", # User agent as included in the HTTP header. Required for getting SearchRespons.sponsored_results. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. }, "visitorId": "A String", # Required. A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor log in/out of the website. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. The field should not contain PII or user-data. We recommend to use Google Analystics [Client ID](https://developers.google.com/analytics/devguides/collection/analyticsjs/field-reference#clientId) for this field. diff --git a/docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html b/docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html index d414bc3a8ec..554c03f0c27 100644 --- a/docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html +++ b/docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html @@ -102,7 +102,7 @@

Method Details

}, "pageSize": 42, # Maximum number of results to return per page. Set this property to the number of prediction results needed. If zero, the service will choose a reasonable default. The maximum allowed value is 100. Values above 100 will be coerced to 100. "pageToken": "A String", # The previous PredictResponse.next_page_token. - "params": { # Additional domain specific parameters for the predictions. Allowed values: * `returnProduct`: Boolean. If set to true, the associated product object will be returned in the `results.metadata` field in the prediction response. * `returnScore`: Boolean. If set to true, the prediction 'score' corresponding to each returned product will be set in the `results.metadata` field in the prediction response. The given 'score' indicates the probability of an product being clicked/purchased given the user's context and history. * `strictFiltering`: Boolean. True by default. If set to false, the service will return generic (unfiltered) popular products instead of empty if your filter blocks all prediction results. * `priceRerankLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-price-reranking', 'low-price-reranking', 'medium-price-reranking', 'high-price-reranking'}. This gives request level control and adjust prediction results based on product price. * `diversityLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-diversity', 'low-diversity', 'medium-diversity', 'high-diversity', 'auto-diversity'}. This gives request level control and adjust prediction results based on product category. + "params": { # Additional domain specific parameters for the predictions. Allowed values: * `returnProduct`: Boolean. If set to true, the associated product object will be returned in the `results.metadata` field in the prediction response. * `returnScore`: Boolean. If set to true, the prediction 'score' corresponding to each returned product will be set in the `results.metadata` field in the prediction response. The given 'score' indicates the probability of an product being clicked/purchased given the user's context and history. * `strictFiltering`: Boolean. True by default. If set to false, the service will return generic (unfiltered) popular products instead of empty if your filter blocks all prediction results. * `priceRerankLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-price-reranking', 'low-price-reranking', 'medium-price-reranking', 'high-price-reranking'}. This gives request-level control and adjusts prediction results based on product price. * `diversityLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-diversity', 'low-diversity', 'medium-diversity', 'high-diversity', 'auto-diversity'}. This gives request-level control and adjusts prediction results based on product category. "a_key": "", }, "userEvent": { # UserEvent captures all metadata information Retail API needs to know about how end users interact with customers' website. # Required. Context about the user, what they are looking at and what action they took to trigger the predict request. Note that this user event detail won't be ingested to userEvent logs. Thus, a separate userEvent write request is required for event logging. @@ -185,8 +185,8 @@

Method Details

"uri": "A String", # Complete URL (window.location.href) of the user's current page. When using the client side event reporting with JavaScript pixel and Google Tag Manager, this value is filled in automatically. Maximum length 5,000 characters. "userInfo": { # Information of an end user. # User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. - "ipAddress": "A String", # The end user's IP address. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. - "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "ipAddress": "A String", # The end user's IP address. Required for getting SearchRespons.sponsored_results. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "userAgent": "A String", # User agent as included in the HTTP header. Required for getting SearchRespons.sponsored_results. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. }, "visitorId": "A String", # Required. A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor log in/out of the website. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. The field should not contain PII or user-data. We recommend to use Google Analystics [Client ID](https://developers.google.com/analytics/devguides/collection/analyticsjs/field-reference#clientId) for this field. diff --git a/docs/dyn/retail_v2alpha.projects.locations.catalogs.userEvents.html b/docs/dyn/retail_v2alpha.projects.locations.catalogs.userEvents.html index 50784d94058..902187947ca 100644 --- a/docs/dyn/retail_v2alpha.projects.locations.catalogs.userEvents.html +++ b/docs/dyn/retail_v2alpha.projects.locations.catalogs.userEvents.html @@ -235,8 +235,8 @@

Method Details

"uri": "A String", # Complete URL (window.location.href) of the user's current page. When using the client side event reporting with JavaScript pixel and Google Tag Manager, this value is filled in automatically. Maximum length 5,000 characters. "userInfo": { # Information of an end user. # User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. - "ipAddress": "A String", # The end user's IP address. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. - "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "ipAddress": "A String", # The end user's IP address. Required for getting SearchRespons.sponsored_results. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "userAgent": "A String", # User agent as included in the HTTP header. Required for getting SearchRespons.sponsored_results. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. }, "visitorId": "A String", # Required. A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor log in/out of the website. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. The field should not contain PII or user-data. We recommend to use Google Analystics [Client ID](https://developers.google.com/analytics/devguides/collection/analyticsjs/field-reference#clientId) for this field. @@ -449,8 +449,8 @@

Method Details

"uri": "A String", # Complete URL (window.location.href) of the user's current page. When using the client side event reporting with JavaScript pixel and Google Tag Manager, this value is filled in automatically. Maximum length 5,000 characters. "userInfo": { # Information of an end user. # User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. - "ipAddress": "A String", # The end user's IP address. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. - "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "ipAddress": "A String", # The end user's IP address. Required for getting SearchRespons.sponsored_results. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "userAgent": "A String", # User agent as included in the HTTP header. Required for getting SearchRespons.sponsored_results. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. }, "visitorId": "A String", # Required. A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor log in/out of the website. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. The field should not contain PII or user-data. We recommend to use Google Analystics [Client ID](https://developers.google.com/analytics/devguides/collection/analyticsjs/field-reference#clientId) for this field. @@ -544,8 +544,8 @@

Method Details

"uri": "A String", # Complete URL (window.location.href) of the user's current page. When using the client side event reporting with JavaScript pixel and Google Tag Manager, this value is filled in automatically. Maximum length 5,000 characters. "userInfo": { # Information of an end user. # User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. - "ipAddress": "A String", # The end user's IP address. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. - "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "ipAddress": "A String", # The end user's IP address. Required for getting SearchRespons.sponsored_results. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "userAgent": "A String", # User agent as included in the HTTP header. Required for getting SearchRespons.sponsored_results. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. }, "visitorId": "A String", # Required. A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor log in/out of the website. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. The field should not contain PII or user-data. We recommend to use Google Analystics [Client ID](https://developers.google.com/analytics/devguides/collection/analyticsjs/field-reference#clientId) for this field. diff --git a/docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html b/docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html index 1b93ab35c89..446f9c17f07 100644 --- a/docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html +++ b/docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html @@ -102,7 +102,7 @@

Method Details

}, "pageSize": 42, # Maximum number of results to return per page. Set this property to the number of prediction results needed. If zero, the service will choose a reasonable default. The maximum allowed value is 100. Values above 100 will be coerced to 100. "pageToken": "A String", # The previous PredictResponse.next_page_token. - "params": { # Additional domain specific parameters for the predictions. Allowed values: * `returnProduct`: Boolean. If set to true, the associated product object will be returned in the `results.metadata` field in the prediction response. * `returnScore`: Boolean. If set to true, the prediction 'score' corresponding to each returned product will be set in the `results.metadata` field in the prediction response. The given 'score' indicates the probability of an product being clicked/purchased given the user's context and history. * `strictFiltering`: Boolean. True by default. If set to false, the service will return generic (unfiltered) popular products instead of empty if your filter blocks all prediction results. * `priceRerankLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-price-reranking', 'low-price-reranking', 'medium-price-reranking', 'high-price-reranking'}. This gives request level control and adjust prediction results based on product price. * `diversityLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-diversity', 'low-diversity', 'medium-diversity', 'high-diversity', 'auto-diversity'}. This gives request level control and adjust prediction results based on product category. + "params": { # Additional domain specific parameters for the predictions. Allowed values: * `returnProduct`: Boolean. If set to true, the associated product object will be returned in the `results.metadata` field in the prediction response. * `returnScore`: Boolean. If set to true, the prediction 'score' corresponding to each returned product will be set in the `results.metadata` field in the prediction response. The given 'score' indicates the probability of an product being clicked/purchased given the user's context and history. * `strictFiltering`: Boolean. True by default. If set to false, the service will return generic (unfiltered) popular products instead of empty if your filter blocks all prediction results. * `priceRerankLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-price-reranking', 'low-price-reranking', 'medium-price-reranking', 'high-price-reranking'}. This gives request-level control and adjusts prediction results based on product price. * `diversityLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-diversity', 'low-diversity', 'medium-diversity', 'high-diversity', 'auto-diversity'}. This gives request-level control and adjusts prediction results based on product category. "a_key": "", }, "userEvent": { # UserEvent captures all metadata information Retail API needs to know about how end users interact with customers' website. # Required. Context about the user, what they are looking at and what action they took to trigger the predict request. Note that this user event detail won't be ingested to userEvent logs. Thus, a separate userEvent write request is required for event logging. @@ -185,8 +185,8 @@

Method Details

"uri": "A String", # Complete URL (window.location.href) of the user's current page. When using the client side event reporting with JavaScript pixel and Google Tag Manager, this value is filled in automatically. Maximum length 5,000 characters. "userInfo": { # Information of an end user. # User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. - "ipAddress": "A String", # The end user's IP address. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. - "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "ipAddress": "A String", # The end user's IP address. Required for getting SearchRespons.sponsored_results. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "userAgent": "A String", # User agent as included in the HTTP header. Required for getting SearchRespons.sponsored_results. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. }, "visitorId": "A String", # Required. A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor log in/out of the website. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. The field should not contain PII or user-data. We recommend to use Google Analystics [Client ID](https://developers.google.com/analytics/devguides/collection/analyticsjs/field-reference#clientId) for this field. diff --git a/docs/dyn/retail_v2beta.projects.locations.catalogs.userEvents.html b/docs/dyn/retail_v2beta.projects.locations.catalogs.userEvents.html index 6e7ed84a84c..f6dd7d86b97 100644 --- a/docs/dyn/retail_v2beta.projects.locations.catalogs.userEvents.html +++ b/docs/dyn/retail_v2beta.projects.locations.catalogs.userEvents.html @@ -235,8 +235,8 @@

Method Details

"uri": "A String", # Complete URL (window.location.href) of the user's current page. When using the client side event reporting with JavaScript pixel and Google Tag Manager, this value is filled in automatically. Maximum length 5,000 characters. "userInfo": { # Information of an end user. # User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. - "ipAddress": "A String", # The end user's IP address. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. - "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "ipAddress": "A String", # The end user's IP address. Required for getting SearchRespons.sponsored_results. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "userAgent": "A String", # User agent as included in the HTTP header. Required for getting SearchRespons.sponsored_results. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. }, "visitorId": "A String", # Required. A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor log in/out of the website. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. The field should not contain PII or user-data. We recommend to use Google Analystics [Client ID](https://developers.google.com/analytics/devguides/collection/analyticsjs/field-reference#clientId) for this field. @@ -449,8 +449,8 @@

Method Details

"uri": "A String", # Complete URL (window.location.href) of the user's current page. When using the client side event reporting with JavaScript pixel and Google Tag Manager, this value is filled in automatically. Maximum length 5,000 characters. "userInfo": { # Information of an end user. # User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. - "ipAddress": "A String", # The end user's IP address. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. - "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "ipAddress": "A String", # The end user's IP address. Required for getting SearchRespons.sponsored_results. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "userAgent": "A String", # User agent as included in the HTTP header. Required for getting SearchRespons.sponsored_results. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. }, "visitorId": "A String", # Required. A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor log in/out of the website. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. The field should not contain PII or user-data. We recommend to use Google Analystics [Client ID](https://developers.google.com/analytics/devguides/collection/analyticsjs/field-reference#clientId) for this field. @@ -544,8 +544,8 @@

Method Details

"uri": "A String", # Complete URL (window.location.href) of the user's current page. When using the client side event reporting with JavaScript pixel and Google Tag Manager, this value is filled in automatically. Maximum length 5,000 characters. "userInfo": { # Information of an end user. # User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. - "ipAddress": "A String", # The end user's IP address. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. - "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "ipAddress": "A String", # The end user's IP address. Required for getting SearchRespons.sponsored_results. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "userAgent": "A String", # User agent as included in the HTTP header. Required for getting SearchRespons.sponsored_results. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. }, "visitorId": "A String", # Required. A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor log in/out of the website. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. The field should not contain PII or user-data. We recommend to use Google Analystics [Client ID](https://developers.google.com/analytics/devguides/collection/analyticsjs/field-reference#clientId) for this field. diff --git a/docs/dyn/run_v1alpha1.namespaces.jobs.html b/docs/dyn/run_v1alpha1.namespaces.jobs.html index a8396af9868..dd8a5197038 100644 --- a/docs/dyn/run_v1alpha1.namespaces.jobs.html +++ b/docs/dyn/run_v1alpha1.namespaces.jobs.html @@ -107,25 +107,25 @@

Method Details

{ # Job represents the configuration of a single job. A job an immutable resource that references a container image which is run to completion. "apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources +optional "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds +optional - "metadata": { # ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional - "annotations": { # Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations +optional + "metadata": { # k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional + "annotations": { # (Optional) Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations "a_key": "A String", }, - "clusterName": "A String", # Not currently supported by Cloud Run. The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. +optional - "creationTimestamp": "A String", # CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional - "deletionGracePeriodSeconds": 42, # Not currently supported by Cloud Run. Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. +optional - "deletionTimestamp": "A String", # DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional - "finalizers": [ # Not currently supported by Cloud Run. Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. +optional +patchStrategy=merge + "clusterName": "A String", # (Optional) Not supported by Cloud Run The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + "creationTimestamp": "A String", # (Optional) CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + "deletionGracePeriodSeconds": 42, # (Optional) Not supported by Cloud Run Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. + "deletionTimestamp": "A String", # (Optional) Not supported by Cloud Run DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + "finalizers": [ # (Optional) Not supported by Cloud Run Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. +patchStrategy=merge "A String", ], - "generateName": "A String", # Not currently supported by Cloud Run. GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency +optional string generateName = 2; - "generation": 42, # A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. +optional - "labels": { # Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and routes. More info: http://kubernetes.io/docs/user-guide/labels +optional + "generateName": "A String", # (Optional) Not supported by Cloud Run GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency string generateName = 2; + "generation": 42, # (Optional) A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. + "labels": { # (Optional) Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and routes. More info: http://kubernetes.io/docs/user-guide/labels "a_key": "A String", }, "name": "A String", # Name must be unique within a namespace, within a Cloud Run region. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names +optional "namespace": "A String", # Namespace defines the space within each name must be unique, within a Cloud Run region. In Cloud Run the namespace must be equal to either the project ID or project number. - "ownerReferences": [ # List of objects that own this object. If ALL objects in the list have been deleted, this object will be garbage collected. +optional + "ownerReferences": [ # (Optional) Not supported by Cloud Run List of objects that own this object. If ALL objects in the list have been deleted, this object will be garbage collected. { # OwnerReference contains enough information to let you identify an owning object. Currently, an owning object must be in the same namespace, so there is no namespace field. "apiVersion": "A String", # API version of the referent. "blockOwnerDeletion": True or False, # If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. +optional @@ -135,9 +135,9 @@

Method Details

"uid": "A String", # UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids }, ], - "resourceVersion": "A String", # An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency +optional - "selfLink": "A String", # SelfLink is a URL representing this object. Populated by the system. Read-only. +optional string selfLink = 4; - "uid": "A String", # UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids +optional + "resourceVersion": "A String", # Optional. An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server or omit the value to disable conflict-detection. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients or omitted. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + "selfLink": "A String", # (Optional) SelfLink is a URL representing this object. Populated by the system. Read-only. string selfLink = 4; + "uid": "A String", # (Optional) UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids }, "spec": { # JobSpec describes how the job execution will look like. # Optional. Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status +optional "activeDeadlineSeconds": "A String", # Optional. Not supported. Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it. If set to zero, the system will never attempt to terminate the job based on time. Otherwise, the value must be positive integer. +optional @@ -149,299 +149,199 @@

Method Details

"activeDeadlineSeconds": "A String", # Optional. Optional duration in seconds the instance may be active relative to StartTime before the system will actively try to mark it failed and kill associated containers. If set to zero, the system will never attempt to kill an instance based on time. Otherwise, value must be a positive integer. +optional "containers": [ # Optional. List of containers belonging to the instance. We disallow a number of fields on this Container. Only a single container may be provided. { # A single application container. This specifies both the container to run, the command to run in the container and the arguments to supply to it. Note that additional arguments may be supplied by the system to the container at runtime. - "args": [ # Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell +optional + "args": [ # (Optional) Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell "A String", ], - "command": [ # Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell +optional + "command": [ "A String", ], - "env": [ # List of environment variables to set in the container. Cannot be updated. +optional + "env": [ # (Optional) List of environment variables to set in the container. { # EnvVar represents an environment variable present in a Container. "name": "A String", # Name of the environment variable. Must be a C_IDENTIFIER. - "value": "A String", # Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". +optional - "valueFrom": { # Cloud Run fully managed: not supported Cloud Run on GKE: supported EnvVarSource represents a source for the value of an EnvVar. # Cloud Run fully managed: supported Source for the environment variable's value. Only supports secret_key_ref. Cloud Run for Anthos: supported Source for the environment variable's value. Cannot be used if value is not empty. +optional - "configMapKeyRef": { # Cloud Run fully managed: not supported Cloud Run on GKE: supported Selects a key from a ConfigMap. # Cloud Run fully managed: not supported Cloud Run on GKE: supported Selects a key of a ConfigMap. +optional - "key": "A String", # Cloud Run fully managed: not supported Cloud Run on GKE: supported The key to select. - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "value": "A String", # (Optional) Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". + "valueFrom": { # EnvVarSource represents a source for the value of an EnvVar. # (Optional) Source for the environment variable's value. Only supports secret_key_ref. Source for the environment variable's value. Cannot be used if value is not empty. + "configMapKeyRef": { # Not supported by Cloud Run Selects a key from a ConfigMap. # (Optional) Not supported by Cloud Run Selects a key of a ConfigMap. + "key": "A String", # The key to select. + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: not supported Cloud Run on GKE: supported The ConfigMap to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run on GKE: supported Specify whether the ConfigMap or its key must be defined +optional + "name": "A String", # The ConfigMap to select from. + "optional": True or False, # (Optional) Specify whether the ConfigMap or its key must be defined }, - "secretKeyRef": { # Cloud Run fully managed: supported Cloud Run on GKE: supported SecretKeySelector selects a key of a Secret. # Cloud Run fully managed: supported. Selects a key (version) of a secret in Secret Manager. Cloud Run for Anthos: supported. Selects a key of a secret in the pod's namespace. +optional - "key": "A String", # Cloud Run fully managed: supported A Cloud Secret Manager secret version. Must be 'latest' for the latest version or an integer for a specific version. Cloud Run for Anthos: supported The key of the secret to select from. Must be a valid secret key. - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "secretKeyRef": { # SecretKeySelector selects a key of a Secret. # (Optional) Selects a key (version) of a secret in Secret Manager. + "key": "A String", # A Cloud Secret Manager secret version. Must be 'latest' for the latest version or an integer for a specific version. The key of the secret to select from. Must be a valid secret key. + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: supported The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. Cloud Run for Anthos: supported The name of the secret in the pod's namespace to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run on GKE: supported Specify whether the Secret or its key must be defined +optional + "name": "A String", # The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. The name of the secret in the pod's namespace to select from. + "optional": True or False, # (Optional) Specify whether the Secret or its key must be defined }, }, }, ], - "envFrom": [ # List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. +optional - { # EnvFromSource represents the source of a set of ConfigMaps - "configMapRef": { # ConfigMapEnvSource selects a ConfigMap to populate the environment variables with. The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables. # The ConfigMap to select from +optional - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "envFrom": [ # (Optional) List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + { # Not supported by Cloud Run EnvFromSource represents the source of a set of ConfigMaps + "configMapRef": { # Not supported by Cloud Run ConfigMapEnvSource selects a ConfigMap to populate the environment variables with. The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables. # (Optional) The ConfigMap to select from + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: not supported Cloud Run for Anthos: supported The ConfigMap to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run for Anthos: supported Specify whether the ConfigMap must be defined +optional + "name": "A String", # The ConfigMap to select from. + "optional": True or False, # (Optional) Specify whether the ConfigMap must be defined }, - "prefix": "A String", # An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. +optional - "secretRef": { # SecretEnvSource selects a Secret to populate the environment variables with. The contents of the target Secret's Data field will represent the key-value pairs as environment variables. # The Secret to select from +optional - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "prefix": "A String", # (Optional) An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + "secretRef": { # Not supported by Cloud Run SecretEnvSource selects a Secret to populate the environment variables with. The contents of the target Secret's Data field will represent the key-value pairs as environment variables. # (Optional) The Secret to select from + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: not supported Cloud Run for Anthos: supported The Secret to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run for Anthos: supported Specify whether the Secret must be defined +optional + "name": "A String", # The Secret to select from. + "optional": True or False, # (Optional) Specify whether the Secret must be defined }, }, ], - "image": "A String", # Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images - "imagePullPolicy": "A String", # Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images +optional - "lifecycle": { # Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted. # Actions that the management system should take in response to container lifecycle events. Cannot be updated. +optional - "postStart": { # Handler defines a specific action that should be taken # PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +optional - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - }, + "image": "A String", # Only supports containers from Google Container Registry or Artifact Registry URL of the Container image. More info: https://kubernetes.io/docs/concepts/containers/images + "imagePullPolicy": "A String", # (Optional) Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + "livenessProbe": { # Not supported by Cloud Run Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # (Optional) Periodic probe of container liveness. Container will be restarted if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "exec": { # Not supported by Cloud Run ExecAction describes a "run in container" action. # (Optional) One and only one of the following should be specified. Exec specifies the action to take. A field inlined from the Handler message. + "command": [ # (Optional) Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + "A String", + ], }, - "preStop": { # Handler defines a specific action that should be taken # PreStop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +optional - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. + "failureThreshold": 42, # (Optional) Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + "httpGet": { # Not supported by Cloud Run HTTPGetAction describes an action based on HTTP Get requests. # (Optional) HTTPGet specifies the http request to perform. A field inlined from the Handler message. + "host": "A String", # (Optional) Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + "httpHeaders": [ # (Optional) Custom headers to set in the request. HTTP allows repeated headers. + { # Not supported by Cloud Run HTTPHeader describes a custom header to be used in HTTP probes + "name": "A String", # The header field name + "value": "A String", # The header field value }, - }, + ], + "path": "A String", # (Optional) Path to access on the HTTP server. + "scheme": "A String", # (Optional) Scheme to use for connecting to the host. Defaults to HTTP. }, - }, - "livenessProbe": { # Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "failureThreshold": 42, # Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. +optional - "handler": { # Handler defines a specific action that should be taken # The action taken to determine the health of a container - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - }, + "initialDelaySeconds": 42, # (Optional) Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "periodSeconds": 42, # (Optional) How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + "successThreshold": 42, # (Optional) Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + "tcpSocket": { # Not supported by Cloud Run TCPSocketAction describes an action based on opening a socket # (Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported A field inlined from the Handler message. + "host": "A String", # (Optional) Optional: Host name to connect to, defaults to the pod IP. + "port": 42, # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. This field is currently limited to integer types only because of proto's inability to properly support the IntOrString golang type. }, - "initialDelaySeconds": 42, # Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "periodSeconds": 42, # How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. +optional - "successThreshold": 42, # Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. +optional - "timeoutSeconds": 42, # Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional + "timeoutSeconds": 42, # (Optional) Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes }, - "name": "A String", # Name of the container specified as a DNS_LABEL. Each container must have a unique name (DNS_LABEL). Cannot be updated. - "ports": [ # List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated. +optional + "name": "A String", # (Optional) Name of the container specified as a DNS_LABEL. Currently unused in Cloud Run. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names + "ports": [ # (Optional) List of ports to expose from the container. Only a single port can be specified. The specified ports must be listening on all interfaces (0.0.0.0) within the container to be accessible. If omitted, a port number will be chosen and passed to the container through the PORT environment variable for the container to listen on. { # ContainerPort represents a network port in a single container. - "containerPort": 42, # Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. - "hostIP": "A String", # What host IP to bind the external port to. +optional - "hostPort": 42, # Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. +optional - "name": "A String", # If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. +optional - "protocol": "A String", # Protocol for port. Must be UDP or TCP. Defaults to "TCP". +optional + "containerPort": 42, # (Optional) Port number the container listens on. This must be a valid port number, 0 < x < 65536. + "name": "A String", # (Optional) If specified, used to specify which protocol to use. Allowed values are "http1" and "h2c". + "protocol": "A String", # (Optional) Protocol for port. Must be "TCP". Defaults to "TCP". }, ], - "readinessProbe": { # Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "failureThreshold": 42, # Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. +optional - "handler": { # Handler defines a specific action that should be taken # The action taken to determine the health of a container - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. + "readinessProbe": { # Not supported by Cloud Run Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # (Optional) Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "exec": { # Not supported by Cloud Run ExecAction describes a "run in container" action. # (Optional) One and only one of the following should be specified. Exec specifies the action to take. A field inlined from the Handler message. + "command": [ # (Optional) Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + "A String", + ], + }, + "failureThreshold": 42, # (Optional) Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + "httpGet": { # Not supported by Cloud Run HTTPGetAction describes an action based on HTTP Get requests. # (Optional) HTTPGet specifies the http request to perform. A field inlined from the Handler message. + "host": "A String", # (Optional) Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + "httpHeaders": [ # (Optional) Custom headers to set in the request. HTTP allows repeated headers. + { # Not supported by Cloud Run HTTPHeader describes a custom header to be used in HTTP probes + "name": "A String", # The header field name + "value": "A String", # The header field value }, - }, + ], + "path": "A String", # (Optional) Path to access on the HTTP server. + "scheme": "A String", # (Optional) Scheme to use for connecting to the host. Defaults to HTTP. + }, + "initialDelaySeconds": 42, # (Optional) Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "periodSeconds": 42, # (Optional) How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + "successThreshold": 42, # (Optional) Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + "tcpSocket": { # Not supported by Cloud Run TCPSocketAction describes an action based on opening a socket # (Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported A field inlined from the Handler message. + "host": "A String", # (Optional) Optional: Host name to connect to, defaults to the pod IP. + "port": 42, # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. This field is currently limited to integer types only because of proto's inability to properly support the IntOrString golang type. }, - "initialDelaySeconds": 42, # Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "periodSeconds": 42, # How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. +optional - "successThreshold": 42, # Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. +optional - "timeoutSeconds": 42, # Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional + "timeoutSeconds": 42, # (Optional) Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes }, - "resources": { # ResourceRequirements describes the compute resource requirements. # Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources +optional - "limits": { # Limits describes the maximum amount of compute resources allowed. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go + "resources": { # ResourceRequirements describes the compute resource requirements. # (Optional) Compute Resources required by this container. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + "limits": { # (Optional) Only memory and CPU are supported. Note: The only supported values for CPU are '1', '2', and '4'. Setting 4 CPU requires at least 2Gi of memory. Limits describes the maximum amount of compute resources allowed. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go "a_key": "A String", }, - "limitsInMap": { # Limits describes the maximum amount of compute resources allowed. This is a temporary field created to migrate away from the map limits field. This is done to become compliant with k8s style API. This field is deprecated in favor of limits field. - "a_key": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto - "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". - }, - }, - "requests": { # Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go + "requests": { # (Optional) Only memory and CPU are supported. Note: The only supported values for CPU are '1' and '2'. Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go "a_key": "A String", }, - "requestsInMap": { # Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. This is a temporary field created to migrate away from the map requests field. This is done to become compliant with k8s style API. This field is deprecated in favor of requests field. - "a_key": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto - "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". - }, - }, }, - "securityContext": { # SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence. # Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +optional - "allowPrivilegeEscalation": True or False, # AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN +optional - "capabilities": { # Adds and removes POSIX capabilities from running containers. # The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. +optional - "add": [ # Added capabilities +optional + "securityContext": { # Not supported by Cloud Run SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence. # (Optional) Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + "runAsUser": 42, # (Optional) The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + }, + "startupProbe": { # Not supported by Cloud Run Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # (Optional) Startup probe of application within the container. All other probes are disabled if a startup probe is provided, until it succeeds. Container will not be added to service endpoints if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "exec": { # Not supported by Cloud Run ExecAction describes a "run in container" action. # (Optional) One and only one of the following should be specified. Exec specifies the action to take. A field inlined from the Handler message. + "command": [ # (Optional) Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. "A String", ], - "drop": [ # Removed capabilities +optional - "A String", + }, + "failureThreshold": 42, # (Optional) Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + "httpGet": { # Not supported by Cloud Run HTTPGetAction describes an action based on HTTP Get requests. # (Optional) HTTPGet specifies the http request to perform. A field inlined from the Handler message. + "host": "A String", # (Optional) Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + "httpHeaders": [ # (Optional) Custom headers to set in the request. HTTP allows repeated headers. + { # Not supported by Cloud Run HTTPHeader describes a custom header to be used in HTTP probes + "name": "A String", # The header field name + "value": "A String", # The header field value + }, ], + "path": "A String", # (Optional) Path to access on the HTTP server. + "scheme": "A String", # (Optional) Scheme to use for connecting to the host. Defaults to HTTP. }, - "privileged": True or False, # Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. +optional - "readOnlyRootFilesystem": True or False, # Whether this container has a read-only root filesystem. Default is false. +optional - "runAsGroup": 42, # The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "runAsNonRoot": True or False, # Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "runAsUser": 42, # The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "seLinuxOptions": { # SELinuxOptions are the labels to be applied to the container # The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "level": "A String", # Level is SELinux level label that applies to the container. +optional - "role": "A String", # Role is a SELinux role label that applies to the container. +optional - "type": "A String", # Type is a SELinux type label that applies to the container. +optional - "user": "A String", # User is a SELinux user label that applies to the container. +optional + "initialDelaySeconds": 42, # (Optional) Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "periodSeconds": 42, # (Optional) How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + "successThreshold": 42, # (Optional) Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + "tcpSocket": { # Not supported by Cloud Run TCPSocketAction describes an action based on opening a socket # (Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported A field inlined from the Handler message. + "host": "A String", # (Optional) Optional: Host name to connect to, defaults to the pod IP. + "port": 42, # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. This field is currently limited to integer types only because of proto's inability to properly support the IntOrString golang type. }, + "timeoutSeconds": 42, # (Optional) Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes }, - "stdin": True or False, # Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. +optional - "stdinOnce": True or False, # Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false +optional - "terminationMessagePath": "A String", # Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. +optional - "terminationMessagePolicy": "A String", # Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. +optional - "tty": True or False, # Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. +optional - "volumeDevices": [ # volumeDevices is the list of block devices to be used by the container. This is an alpha feature and may change in the future. +optional - { # volumeDevice describes a mapping of a raw block device within a container. - "devicePath": "A String", # devicePath is the path inside of the container that the device will be mapped to. - "name": "A String", # name must match the name of a persistentVolumeClaim in the pod - }, - ], - "volumeMounts": [ # Pod volumes to mount into the container's filesystem. Cannot be updated. +optional - { # VolumeMount describes a mounting of a Volume within a container. + "terminationMessagePath": "A String", # (Optional) Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. + "terminationMessagePolicy": "A String", # (Optional) Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + "volumeMounts": [ # (Optional) Volume to mount into the container's filesystem. Only supports SecretVolumeSources. Pod volumes to mount into the container's filesystem. + { # Not supported by Cloud Run VolumeMount describes a mounting of a Volume within a container. "mountPath": "A String", # Path within the container at which the volume should be mounted. Must not contain ':'. - "mountPropagation": "A String", # mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationHostToContainer is used. This field is beta in 1.10. +optional "name": "A String", # This must match the Name of a Volume. - "readOnly": True or False, # Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. +optional - "subPath": "A String", # Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). +optional + "readOnly": True or False, # (Optional) Only true is accepted. Defaults to true. + "subPath": "A String", # (Optional) Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). }, ], - "workingDir": "A String", # Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. +optional + "workingDir": "A String", # (Optional) Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. }, ], "restartPolicy": "A String", # Optional. Restart policy for all containers within the instance. Allowed values are: - OnFailure: Instances will always be restarted on failure if the backoffLimit has not been reached. - Never: Instances are never restarted and all failures are permanent. Cannot be used if backoffLimit is set. +optional "serviceAccountName": "A String", # Optional. Email address of the IAM service account associated with the instance of a Job. The service account represents the identity of the running instance, and determines what permissions the instance has. If not provided, the instance will use the project's default service account. +optional "terminationGracePeriodSeconds": "A String", # Optional. Optional duration in seconds the instance needs to terminate gracefully. Value must be non-negative integer. The value zero indicates delete immediately. The grace period is the duration in seconds after the processes running in the instance are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. +optional "volumes": [ # Optional. List of volumes that can be mounted by containers belonging to the instance. More info: https://kubernetes.io/docs/concepts/storage/volumes +optional - { # Volume represents a named volume in a container. - "configMap": { # Adapts a ConfigMap into a volume. The contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. - "defaultMode": 42, # Mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. - "items": [ # If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. + { # Not supported by Cloud Run Volume represents a named volume in a container. + "configMap": { # Not supported by Cloud Run Adapts a ConfigMap into a volume. The contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. + "defaultMode": 42, # (Optional) Mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + "items": [ # (Optional) If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified that is not present in the Secret, the volume setup will error unless it is marked optional. { # Maps a string key to a path within a volume. - "key": "A String", # Cloud Run fully managed: supported The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. Cloud Run for Anthos: supported The key to project. - "mode": 42, # Mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. +optional - "path": "A String", # Cloud Run fully managed: supported Cloud Run for Anthos: supported The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + "key": "A String", # The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. The key to project. + "mode": 42, # (Optional) Mode bits to use on this file, must be a value between 0000 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + "path": "A String", # The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. }, ], "name": "A String", # Name of the config. - "optional": True or False, # Specify whether the Secret or its keys must be defined. + "optional": True or False, # (Optional) Specify whether the Secret or its keys must be defined. }, "name": "A String", # Volume's name. - "secret": { # The contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. - "defaultMode": 42, # Mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. - "items": [ # Cloud Run fully managed: supported If unspecified, the volume will expose a file whose name is the secret_name. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a key and a path. Cloud Run for Anthos: supported If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. + "secret": { # The secret's value will be presented as the content of a file whose name is defined in the item path. If no items are defined, the name of the file is the secret_name. The contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. + "defaultMode": 42, # (Optional) Mode bits to use on created files by default. Must be a value between 0000 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. NOTE: This is an integer representation of the mode bits. So, the integer value should look exactly as the chmod numeric notation, i.e. Unix chmod "777" (a=rwx) should have the integer value 777. + "items": [ # (Optional) If unspecified, the volume will expose a file whose name is the secret_name. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a key and a path. If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified that is not present in the Secret, the volume setup will error unless it is marked optional. { # Maps a string key to a path within a volume. - "key": "A String", # Cloud Run fully managed: supported The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. Cloud Run for Anthos: supported The key to project. - "mode": 42, # Mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. +optional - "path": "A String", # Cloud Run fully managed: supported Cloud Run for Anthos: supported The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + "key": "A String", # The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. The key to project. + "mode": 42, # (Optional) Mode bits to use on this file, must be a value between 0000 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + "path": "A String", # The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. }, ], - "optional": True or False, # Specify whether the Secret or its keys must be defined. - "secretName": "A String", # Cloud Run fully managed: supported The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. Cloud Run for Anthos: supported Name of the secret in the container's namespace to use. + "optional": True or False, # (Optional) Specify whether the Secret or its keys must be defined. + "secretName": "A String", # The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. Name of the secret in the container's namespace to use. }, }, ], @@ -492,25 +392,25 @@

Method Details

{ # Job represents the configuration of a single job. A job an immutable resource that references a container image which is run to completion. "apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources +optional "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds +optional - "metadata": { # ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional - "annotations": { # Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations +optional + "metadata": { # k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional + "annotations": { # (Optional) Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations "a_key": "A String", }, - "clusterName": "A String", # Not currently supported by Cloud Run. The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. +optional - "creationTimestamp": "A String", # CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional - "deletionGracePeriodSeconds": 42, # Not currently supported by Cloud Run. Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. +optional - "deletionTimestamp": "A String", # DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional - "finalizers": [ # Not currently supported by Cloud Run. Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. +optional +patchStrategy=merge + "clusterName": "A String", # (Optional) Not supported by Cloud Run The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + "creationTimestamp": "A String", # (Optional) CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + "deletionGracePeriodSeconds": 42, # (Optional) Not supported by Cloud Run Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. + "deletionTimestamp": "A String", # (Optional) Not supported by Cloud Run DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + "finalizers": [ # (Optional) Not supported by Cloud Run Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. +patchStrategy=merge "A String", ], - "generateName": "A String", # Not currently supported by Cloud Run. GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency +optional string generateName = 2; - "generation": 42, # A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. +optional - "labels": { # Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and routes. More info: http://kubernetes.io/docs/user-guide/labels +optional + "generateName": "A String", # (Optional) Not supported by Cloud Run GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency string generateName = 2; + "generation": 42, # (Optional) A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. + "labels": { # (Optional) Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and routes. More info: http://kubernetes.io/docs/user-guide/labels "a_key": "A String", }, "name": "A String", # Name must be unique within a namespace, within a Cloud Run region. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names +optional "namespace": "A String", # Namespace defines the space within each name must be unique, within a Cloud Run region. In Cloud Run the namespace must be equal to either the project ID or project number. - "ownerReferences": [ # List of objects that own this object. If ALL objects in the list have been deleted, this object will be garbage collected. +optional + "ownerReferences": [ # (Optional) Not supported by Cloud Run List of objects that own this object. If ALL objects in the list have been deleted, this object will be garbage collected. { # OwnerReference contains enough information to let you identify an owning object. Currently, an owning object must be in the same namespace, so there is no namespace field. "apiVersion": "A String", # API version of the referent. "blockOwnerDeletion": True or False, # If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. +optional @@ -520,9 +420,9 @@

Method Details

"uid": "A String", # UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids }, ], - "resourceVersion": "A String", # An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency +optional - "selfLink": "A String", # SelfLink is a URL representing this object. Populated by the system. Read-only. +optional string selfLink = 4; - "uid": "A String", # UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids +optional + "resourceVersion": "A String", # Optional. An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server or omit the value to disable conflict-detection. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients or omitted. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + "selfLink": "A String", # (Optional) SelfLink is a URL representing this object. Populated by the system. Read-only. string selfLink = 4; + "uid": "A String", # (Optional) UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids }, "spec": { # JobSpec describes how the job execution will look like. # Optional. Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status +optional "activeDeadlineSeconds": "A String", # Optional. Not supported. Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it. If set to zero, the system will never attempt to terminate the job based on time. Otherwise, the value must be positive integer. +optional @@ -534,299 +434,199 @@

Method Details

"activeDeadlineSeconds": "A String", # Optional. Optional duration in seconds the instance may be active relative to StartTime before the system will actively try to mark it failed and kill associated containers. If set to zero, the system will never attempt to kill an instance based on time. Otherwise, value must be a positive integer. +optional "containers": [ # Optional. List of containers belonging to the instance. We disallow a number of fields on this Container. Only a single container may be provided. { # A single application container. This specifies both the container to run, the command to run in the container and the arguments to supply to it. Note that additional arguments may be supplied by the system to the container at runtime. - "args": [ # Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell +optional + "args": [ # (Optional) Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell "A String", ], - "command": [ # Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell +optional + "command": [ "A String", ], - "env": [ # List of environment variables to set in the container. Cannot be updated. +optional + "env": [ # (Optional) List of environment variables to set in the container. { # EnvVar represents an environment variable present in a Container. "name": "A String", # Name of the environment variable. Must be a C_IDENTIFIER. - "value": "A String", # Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". +optional - "valueFrom": { # Cloud Run fully managed: not supported Cloud Run on GKE: supported EnvVarSource represents a source for the value of an EnvVar. # Cloud Run fully managed: supported Source for the environment variable's value. Only supports secret_key_ref. Cloud Run for Anthos: supported Source for the environment variable's value. Cannot be used if value is not empty. +optional - "configMapKeyRef": { # Cloud Run fully managed: not supported Cloud Run on GKE: supported Selects a key from a ConfigMap. # Cloud Run fully managed: not supported Cloud Run on GKE: supported Selects a key of a ConfigMap. +optional - "key": "A String", # Cloud Run fully managed: not supported Cloud Run on GKE: supported The key to select. - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "value": "A String", # (Optional) Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". + "valueFrom": { # EnvVarSource represents a source for the value of an EnvVar. # (Optional) Source for the environment variable's value. Only supports secret_key_ref. Source for the environment variable's value. Cannot be used if value is not empty. + "configMapKeyRef": { # Not supported by Cloud Run Selects a key from a ConfigMap. # (Optional) Not supported by Cloud Run Selects a key of a ConfigMap. + "key": "A String", # The key to select. + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: not supported Cloud Run on GKE: supported The ConfigMap to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run on GKE: supported Specify whether the ConfigMap or its key must be defined +optional + "name": "A String", # The ConfigMap to select from. + "optional": True or False, # (Optional) Specify whether the ConfigMap or its key must be defined }, - "secretKeyRef": { # Cloud Run fully managed: supported Cloud Run on GKE: supported SecretKeySelector selects a key of a Secret. # Cloud Run fully managed: supported. Selects a key (version) of a secret in Secret Manager. Cloud Run for Anthos: supported. Selects a key of a secret in the pod's namespace. +optional - "key": "A String", # Cloud Run fully managed: supported A Cloud Secret Manager secret version. Must be 'latest' for the latest version or an integer for a specific version. Cloud Run for Anthos: supported The key of the secret to select from. Must be a valid secret key. - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "secretKeyRef": { # SecretKeySelector selects a key of a Secret. # (Optional) Selects a key (version) of a secret in Secret Manager. + "key": "A String", # A Cloud Secret Manager secret version. Must be 'latest' for the latest version or an integer for a specific version. The key of the secret to select from. Must be a valid secret key. + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: supported The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. Cloud Run for Anthos: supported The name of the secret in the pod's namespace to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run on GKE: supported Specify whether the Secret or its key must be defined +optional + "name": "A String", # The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. The name of the secret in the pod's namespace to select from. + "optional": True or False, # (Optional) Specify whether the Secret or its key must be defined }, }, }, ], - "envFrom": [ # List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. +optional - { # EnvFromSource represents the source of a set of ConfigMaps - "configMapRef": { # ConfigMapEnvSource selects a ConfigMap to populate the environment variables with. The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables. # The ConfigMap to select from +optional - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "envFrom": [ # (Optional) List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + { # Not supported by Cloud Run EnvFromSource represents the source of a set of ConfigMaps + "configMapRef": { # Not supported by Cloud Run ConfigMapEnvSource selects a ConfigMap to populate the environment variables with. The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables. # (Optional) The ConfigMap to select from + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: not supported Cloud Run for Anthos: supported The ConfigMap to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run for Anthos: supported Specify whether the ConfigMap must be defined +optional + "name": "A String", # The ConfigMap to select from. + "optional": True or False, # (Optional) Specify whether the ConfigMap must be defined }, - "prefix": "A String", # An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. +optional - "secretRef": { # SecretEnvSource selects a Secret to populate the environment variables with. The contents of the target Secret's Data field will represent the key-value pairs as environment variables. # The Secret to select from +optional - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "prefix": "A String", # (Optional) An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + "secretRef": { # Not supported by Cloud Run SecretEnvSource selects a Secret to populate the environment variables with. The contents of the target Secret's Data field will represent the key-value pairs as environment variables. # (Optional) The Secret to select from + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: not supported Cloud Run for Anthos: supported The Secret to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run for Anthos: supported Specify whether the Secret must be defined +optional + "name": "A String", # The Secret to select from. + "optional": True or False, # (Optional) Specify whether the Secret must be defined }, }, ], - "image": "A String", # Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images - "imagePullPolicy": "A String", # Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images +optional - "lifecycle": { # Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted. # Actions that the management system should take in response to container lifecycle events. Cannot be updated. +optional - "postStart": { # Handler defines a specific action that should be taken # PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +optional - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - }, + "image": "A String", # Only supports containers from Google Container Registry or Artifact Registry URL of the Container image. More info: https://kubernetes.io/docs/concepts/containers/images + "imagePullPolicy": "A String", # (Optional) Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + "livenessProbe": { # Not supported by Cloud Run Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # (Optional) Periodic probe of container liveness. Container will be restarted if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "exec": { # Not supported by Cloud Run ExecAction describes a "run in container" action. # (Optional) One and only one of the following should be specified. Exec specifies the action to take. A field inlined from the Handler message. + "command": [ # (Optional) Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + "A String", + ], }, - "preStop": { # Handler defines a specific action that should be taken # PreStop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +optional - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. + "failureThreshold": 42, # (Optional) Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + "httpGet": { # Not supported by Cloud Run HTTPGetAction describes an action based on HTTP Get requests. # (Optional) HTTPGet specifies the http request to perform. A field inlined from the Handler message. + "host": "A String", # (Optional) Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + "httpHeaders": [ # (Optional) Custom headers to set in the request. HTTP allows repeated headers. + { # Not supported by Cloud Run HTTPHeader describes a custom header to be used in HTTP probes + "name": "A String", # The header field name + "value": "A String", # The header field value }, - }, + ], + "path": "A String", # (Optional) Path to access on the HTTP server. + "scheme": "A String", # (Optional) Scheme to use for connecting to the host. Defaults to HTTP. }, - }, - "livenessProbe": { # Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "failureThreshold": 42, # Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. +optional - "handler": { # Handler defines a specific action that should be taken # The action taken to determine the health of a container - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - }, + "initialDelaySeconds": 42, # (Optional) Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "periodSeconds": 42, # (Optional) How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + "successThreshold": 42, # (Optional) Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + "tcpSocket": { # Not supported by Cloud Run TCPSocketAction describes an action based on opening a socket # (Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported A field inlined from the Handler message. + "host": "A String", # (Optional) Optional: Host name to connect to, defaults to the pod IP. + "port": 42, # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. This field is currently limited to integer types only because of proto's inability to properly support the IntOrString golang type. }, - "initialDelaySeconds": 42, # Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "periodSeconds": 42, # How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. +optional - "successThreshold": 42, # Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. +optional - "timeoutSeconds": 42, # Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional + "timeoutSeconds": 42, # (Optional) Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes }, - "name": "A String", # Name of the container specified as a DNS_LABEL. Each container must have a unique name (DNS_LABEL). Cannot be updated. - "ports": [ # List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated. +optional + "name": "A String", # (Optional) Name of the container specified as a DNS_LABEL. Currently unused in Cloud Run. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names + "ports": [ # (Optional) List of ports to expose from the container. Only a single port can be specified. The specified ports must be listening on all interfaces (0.0.0.0) within the container to be accessible. If omitted, a port number will be chosen and passed to the container through the PORT environment variable for the container to listen on. { # ContainerPort represents a network port in a single container. - "containerPort": 42, # Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. - "hostIP": "A String", # What host IP to bind the external port to. +optional - "hostPort": 42, # Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. +optional - "name": "A String", # If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. +optional - "protocol": "A String", # Protocol for port. Must be UDP or TCP. Defaults to "TCP". +optional + "containerPort": 42, # (Optional) Port number the container listens on. This must be a valid port number, 0 < x < 65536. + "name": "A String", # (Optional) If specified, used to specify which protocol to use. Allowed values are "http1" and "h2c". + "protocol": "A String", # (Optional) Protocol for port. Must be "TCP". Defaults to "TCP". }, ], - "readinessProbe": { # Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "failureThreshold": 42, # Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. +optional - "handler": { # Handler defines a specific action that should be taken # The action taken to determine the health of a container - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. + "readinessProbe": { # Not supported by Cloud Run Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # (Optional) Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "exec": { # Not supported by Cloud Run ExecAction describes a "run in container" action. # (Optional) One and only one of the following should be specified. Exec specifies the action to take. A field inlined from the Handler message. + "command": [ # (Optional) Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + "A String", + ], + }, + "failureThreshold": 42, # (Optional) Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + "httpGet": { # Not supported by Cloud Run HTTPGetAction describes an action based on HTTP Get requests. # (Optional) HTTPGet specifies the http request to perform. A field inlined from the Handler message. + "host": "A String", # (Optional) Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + "httpHeaders": [ # (Optional) Custom headers to set in the request. HTTP allows repeated headers. + { # Not supported by Cloud Run HTTPHeader describes a custom header to be used in HTTP probes + "name": "A String", # The header field name + "value": "A String", # The header field value }, - }, + ], + "path": "A String", # (Optional) Path to access on the HTTP server. + "scheme": "A String", # (Optional) Scheme to use for connecting to the host. Defaults to HTTP. + }, + "initialDelaySeconds": 42, # (Optional) Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "periodSeconds": 42, # (Optional) How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + "successThreshold": 42, # (Optional) Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + "tcpSocket": { # Not supported by Cloud Run TCPSocketAction describes an action based on opening a socket # (Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported A field inlined from the Handler message. + "host": "A String", # (Optional) Optional: Host name to connect to, defaults to the pod IP. + "port": 42, # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. This field is currently limited to integer types only because of proto's inability to properly support the IntOrString golang type. }, - "initialDelaySeconds": 42, # Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "periodSeconds": 42, # How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. +optional - "successThreshold": 42, # Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. +optional - "timeoutSeconds": 42, # Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional + "timeoutSeconds": 42, # (Optional) Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes }, - "resources": { # ResourceRequirements describes the compute resource requirements. # Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources +optional - "limits": { # Limits describes the maximum amount of compute resources allowed. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go + "resources": { # ResourceRequirements describes the compute resource requirements. # (Optional) Compute Resources required by this container. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + "limits": { # (Optional) Only memory and CPU are supported. Note: The only supported values for CPU are '1', '2', and '4'. Setting 4 CPU requires at least 2Gi of memory. Limits describes the maximum amount of compute resources allowed. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go "a_key": "A String", }, - "limitsInMap": { # Limits describes the maximum amount of compute resources allowed. This is a temporary field created to migrate away from the map limits field. This is done to become compliant with k8s style API. This field is deprecated in favor of limits field. - "a_key": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto - "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". - }, - }, - "requests": { # Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go + "requests": { # (Optional) Only memory and CPU are supported. Note: The only supported values for CPU are '1' and '2'. Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go "a_key": "A String", }, - "requestsInMap": { # Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. This is a temporary field created to migrate away from the map requests field. This is done to become compliant with k8s style API. This field is deprecated in favor of requests field. - "a_key": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto - "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". - }, - }, }, - "securityContext": { # SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence. # Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +optional - "allowPrivilegeEscalation": True or False, # AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN +optional - "capabilities": { # Adds and removes POSIX capabilities from running containers. # The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. +optional - "add": [ # Added capabilities +optional + "securityContext": { # Not supported by Cloud Run SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence. # (Optional) Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + "runAsUser": 42, # (Optional) The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + }, + "startupProbe": { # Not supported by Cloud Run Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # (Optional) Startup probe of application within the container. All other probes are disabled if a startup probe is provided, until it succeeds. Container will not be added to service endpoints if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "exec": { # Not supported by Cloud Run ExecAction describes a "run in container" action. # (Optional) One and only one of the following should be specified. Exec specifies the action to take. A field inlined from the Handler message. + "command": [ # (Optional) Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. "A String", ], - "drop": [ # Removed capabilities +optional - "A String", + }, + "failureThreshold": 42, # (Optional) Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + "httpGet": { # Not supported by Cloud Run HTTPGetAction describes an action based on HTTP Get requests. # (Optional) HTTPGet specifies the http request to perform. A field inlined from the Handler message. + "host": "A String", # (Optional) Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + "httpHeaders": [ # (Optional) Custom headers to set in the request. HTTP allows repeated headers. + { # Not supported by Cloud Run HTTPHeader describes a custom header to be used in HTTP probes + "name": "A String", # The header field name + "value": "A String", # The header field value + }, ], + "path": "A String", # (Optional) Path to access on the HTTP server. + "scheme": "A String", # (Optional) Scheme to use for connecting to the host. Defaults to HTTP. }, - "privileged": True or False, # Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. +optional - "readOnlyRootFilesystem": True or False, # Whether this container has a read-only root filesystem. Default is false. +optional - "runAsGroup": 42, # The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "runAsNonRoot": True or False, # Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "runAsUser": 42, # The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "seLinuxOptions": { # SELinuxOptions are the labels to be applied to the container # The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "level": "A String", # Level is SELinux level label that applies to the container. +optional - "role": "A String", # Role is a SELinux role label that applies to the container. +optional - "type": "A String", # Type is a SELinux type label that applies to the container. +optional - "user": "A String", # User is a SELinux user label that applies to the container. +optional + "initialDelaySeconds": 42, # (Optional) Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "periodSeconds": 42, # (Optional) How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + "successThreshold": 42, # (Optional) Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + "tcpSocket": { # Not supported by Cloud Run TCPSocketAction describes an action based on opening a socket # (Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported A field inlined from the Handler message. + "host": "A String", # (Optional) Optional: Host name to connect to, defaults to the pod IP. + "port": 42, # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. This field is currently limited to integer types only because of proto's inability to properly support the IntOrString golang type. }, + "timeoutSeconds": 42, # (Optional) Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes }, - "stdin": True or False, # Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. +optional - "stdinOnce": True or False, # Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false +optional - "terminationMessagePath": "A String", # Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. +optional - "terminationMessagePolicy": "A String", # Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. +optional - "tty": True or False, # Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. +optional - "volumeDevices": [ # volumeDevices is the list of block devices to be used by the container. This is an alpha feature and may change in the future. +optional - { # volumeDevice describes a mapping of a raw block device within a container. - "devicePath": "A String", # devicePath is the path inside of the container that the device will be mapped to. - "name": "A String", # name must match the name of a persistentVolumeClaim in the pod - }, - ], - "volumeMounts": [ # Pod volumes to mount into the container's filesystem. Cannot be updated. +optional - { # VolumeMount describes a mounting of a Volume within a container. + "terminationMessagePath": "A String", # (Optional) Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. + "terminationMessagePolicy": "A String", # (Optional) Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + "volumeMounts": [ # (Optional) Volume to mount into the container's filesystem. Only supports SecretVolumeSources. Pod volumes to mount into the container's filesystem. + { # Not supported by Cloud Run VolumeMount describes a mounting of a Volume within a container. "mountPath": "A String", # Path within the container at which the volume should be mounted. Must not contain ':'. - "mountPropagation": "A String", # mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationHostToContainer is used. This field is beta in 1.10. +optional "name": "A String", # This must match the Name of a Volume. - "readOnly": True or False, # Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. +optional - "subPath": "A String", # Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). +optional + "readOnly": True or False, # (Optional) Only true is accepted. Defaults to true. + "subPath": "A String", # (Optional) Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). }, ], - "workingDir": "A String", # Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. +optional + "workingDir": "A String", # (Optional) Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. }, ], "restartPolicy": "A String", # Optional. Restart policy for all containers within the instance. Allowed values are: - OnFailure: Instances will always be restarted on failure if the backoffLimit has not been reached. - Never: Instances are never restarted and all failures are permanent. Cannot be used if backoffLimit is set. +optional "serviceAccountName": "A String", # Optional. Email address of the IAM service account associated with the instance of a Job. The service account represents the identity of the running instance, and determines what permissions the instance has. If not provided, the instance will use the project's default service account. +optional "terminationGracePeriodSeconds": "A String", # Optional. Optional duration in seconds the instance needs to terminate gracefully. Value must be non-negative integer. The value zero indicates delete immediately. The grace period is the duration in seconds after the processes running in the instance are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. +optional "volumes": [ # Optional. List of volumes that can be mounted by containers belonging to the instance. More info: https://kubernetes.io/docs/concepts/storage/volumes +optional - { # Volume represents a named volume in a container. - "configMap": { # Adapts a ConfigMap into a volume. The contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. - "defaultMode": 42, # Mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. - "items": [ # If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. + { # Not supported by Cloud Run Volume represents a named volume in a container. + "configMap": { # Not supported by Cloud Run Adapts a ConfigMap into a volume. The contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. + "defaultMode": 42, # (Optional) Mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + "items": [ # (Optional) If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified that is not present in the Secret, the volume setup will error unless it is marked optional. { # Maps a string key to a path within a volume. - "key": "A String", # Cloud Run fully managed: supported The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. Cloud Run for Anthos: supported The key to project. - "mode": 42, # Mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. +optional - "path": "A String", # Cloud Run fully managed: supported Cloud Run for Anthos: supported The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + "key": "A String", # The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. The key to project. + "mode": 42, # (Optional) Mode bits to use on this file, must be a value between 0000 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + "path": "A String", # The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. }, ], "name": "A String", # Name of the config. - "optional": True or False, # Specify whether the Secret or its keys must be defined. + "optional": True or False, # (Optional) Specify whether the Secret or its keys must be defined. }, "name": "A String", # Volume's name. - "secret": { # The contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. - "defaultMode": 42, # Mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. - "items": [ # Cloud Run fully managed: supported If unspecified, the volume will expose a file whose name is the secret_name. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a key and a path. Cloud Run for Anthos: supported If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. + "secret": { # The secret's value will be presented as the content of a file whose name is defined in the item path. If no items are defined, the name of the file is the secret_name. The contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. + "defaultMode": 42, # (Optional) Mode bits to use on created files by default. Must be a value between 0000 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. NOTE: This is an integer representation of the mode bits. So, the integer value should look exactly as the chmod numeric notation, i.e. Unix chmod "777" (a=rwx) should have the integer value 777. + "items": [ # (Optional) If unspecified, the volume will expose a file whose name is the secret_name. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a key and a path. If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified that is not present in the Secret, the volume setup will error unless it is marked optional. { # Maps a string key to a path within a volume. - "key": "A String", # Cloud Run fully managed: supported The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. Cloud Run for Anthos: supported The key to project. - "mode": 42, # Mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. +optional - "path": "A String", # Cloud Run fully managed: supported Cloud Run for Anthos: supported The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + "key": "A String", # The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. The key to project. + "mode": 42, # (Optional) Mode bits to use on this file, must be a value between 0000 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + "path": "A String", # The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. }, ], - "optional": True or False, # Specify whether the Secret or its keys must be defined. - "secretName": "A String", # Cloud Run fully managed: supported The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. Cloud Run for Anthos: supported Name of the secret in the container's namespace to use. + "optional": True or False, # (Optional) Specify whether the Secret or its keys must be defined. + "secretName": "A String", # The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. Name of the secret in the container's namespace to use. }, }, ], @@ -905,25 +705,25 @@

Method Details

{ # Job represents the configuration of a single job. A job an immutable resource that references a container image which is run to completion. "apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources +optional "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds +optional - "metadata": { # ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional - "annotations": { # Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations +optional + "metadata": { # k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional + "annotations": { # (Optional) Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations "a_key": "A String", }, - "clusterName": "A String", # Not currently supported by Cloud Run. The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. +optional - "creationTimestamp": "A String", # CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional - "deletionGracePeriodSeconds": 42, # Not currently supported by Cloud Run. Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. +optional - "deletionTimestamp": "A String", # DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional - "finalizers": [ # Not currently supported by Cloud Run. Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. +optional +patchStrategy=merge + "clusterName": "A String", # (Optional) Not supported by Cloud Run The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + "creationTimestamp": "A String", # (Optional) CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + "deletionGracePeriodSeconds": 42, # (Optional) Not supported by Cloud Run Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. + "deletionTimestamp": "A String", # (Optional) Not supported by Cloud Run DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + "finalizers": [ # (Optional) Not supported by Cloud Run Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. +patchStrategy=merge "A String", ], - "generateName": "A String", # Not currently supported by Cloud Run. GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency +optional string generateName = 2; - "generation": 42, # A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. +optional - "labels": { # Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and routes. More info: http://kubernetes.io/docs/user-guide/labels +optional + "generateName": "A String", # (Optional) Not supported by Cloud Run GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency string generateName = 2; + "generation": 42, # (Optional) A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. + "labels": { # (Optional) Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and routes. More info: http://kubernetes.io/docs/user-guide/labels "a_key": "A String", }, "name": "A String", # Name must be unique within a namespace, within a Cloud Run region. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names +optional "namespace": "A String", # Namespace defines the space within each name must be unique, within a Cloud Run region. In Cloud Run the namespace must be equal to either the project ID or project number. - "ownerReferences": [ # List of objects that own this object. If ALL objects in the list have been deleted, this object will be garbage collected. +optional + "ownerReferences": [ # (Optional) Not supported by Cloud Run List of objects that own this object. If ALL objects in the list have been deleted, this object will be garbage collected. { # OwnerReference contains enough information to let you identify an owning object. Currently, an owning object must be in the same namespace, so there is no namespace field. "apiVersion": "A String", # API version of the referent. "blockOwnerDeletion": True or False, # If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. +optional @@ -933,9 +733,9 @@

Method Details

"uid": "A String", # UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids }, ], - "resourceVersion": "A String", # An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency +optional - "selfLink": "A String", # SelfLink is a URL representing this object. Populated by the system. Read-only. +optional string selfLink = 4; - "uid": "A String", # UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids +optional + "resourceVersion": "A String", # Optional. An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server or omit the value to disable conflict-detection. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients or omitted. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + "selfLink": "A String", # (Optional) SelfLink is a URL representing this object. Populated by the system. Read-only. string selfLink = 4; + "uid": "A String", # (Optional) UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids }, "spec": { # JobSpec describes how the job execution will look like. # Optional. Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status +optional "activeDeadlineSeconds": "A String", # Optional. Not supported. Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it. If set to zero, the system will never attempt to terminate the job based on time. Otherwise, the value must be positive integer. +optional @@ -947,299 +747,199 @@

Method Details

"activeDeadlineSeconds": "A String", # Optional. Optional duration in seconds the instance may be active relative to StartTime before the system will actively try to mark it failed and kill associated containers. If set to zero, the system will never attempt to kill an instance based on time. Otherwise, value must be a positive integer. +optional "containers": [ # Optional. List of containers belonging to the instance. We disallow a number of fields on this Container. Only a single container may be provided. { # A single application container. This specifies both the container to run, the command to run in the container and the arguments to supply to it. Note that additional arguments may be supplied by the system to the container at runtime. - "args": [ # Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell +optional + "args": [ # (Optional) Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell "A String", ], - "command": [ # Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell +optional + "command": [ "A String", ], - "env": [ # List of environment variables to set in the container. Cannot be updated. +optional + "env": [ # (Optional) List of environment variables to set in the container. { # EnvVar represents an environment variable present in a Container. "name": "A String", # Name of the environment variable. Must be a C_IDENTIFIER. - "value": "A String", # Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". +optional - "valueFrom": { # Cloud Run fully managed: not supported Cloud Run on GKE: supported EnvVarSource represents a source for the value of an EnvVar. # Cloud Run fully managed: supported Source for the environment variable's value. Only supports secret_key_ref. Cloud Run for Anthos: supported Source for the environment variable's value. Cannot be used if value is not empty. +optional - "configMapKeyRef": { # Cloud Run fully managed: not supported Cloud Run on GKE: supported Selects a key from a ConfigMap. # Cloud Run fully managed: not supported Cloud Run on GKE: supported Selects a key of a ConfigMap. +optional - "key": "A String", # Cloud Run fully managed: not supported Cloud Run on GKE: supported The key to select. - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "value": "A String", # (Optional) Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". + "valueFrom": { # EnvVarSource represents a source for the value of an EnvVar. # (Optional) Source for the environment variable's value. Only supports secret_key_ref. Source for the environment variable's value. Cannot be used if value is not empty. + "configMapKeyRef": { # Not supported by Cloud Run Selects a key from a ConfigMap. # (Optional) Not supported by Cloud Run Selects a key of a ConfigMap. + "key": "A String", # The key to select. + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: not supported Cloud Run on GKE: supported The ConfigMap to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run on GKE: supported Specify whether the ConfigMap or its key must be defined +optional + "name": "A String", # The ConfigMap to select from. + "optional": True or False, # (Optional) Specify whether the ConfigMap or its key must be defined }, - "secretKeyRef": { # Cloud Run fully managed: supported Cloud Run on GKE: supported SecretKeySelector selects a key of a Secret. # Cloud Run fully managed: supported. Selects a key (version) of a secret in Secret Manager. Cloud Run for Anthos: supported. Selects a key of a secret in the pod's namespace. +optional - "key": "A String", # Cloud Run fully managed: supported A Cloud Secret Manager secret version. Must be 'latest' for the latest version or an integer for a specific version. Cloud Run for Anthos: supported The key of the secret to select from. Must be a valid secret key. - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "secretKeyRef": { # SecretKeySelector selects a key of a Secret. # (Optional) Selects a key (version) of a secret in Secret Manager. + "key": "A String", # A Cloud Secret Manager secret version. Must be 'latest' for the latest version or an integer for a specific version. The key of the secret to select from. Must be a valid secret key. + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: supported The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. Cloud Run for Anthos: supported The name of the secret in the pod's namespace to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run on GKE: supported Specify whether the Secret or its key must be defined +optional + "name": "A String", # The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. The name of the secret in the pod's namespace to select from. + "optional": True or False, # (Optional) Specify whether the Secret or its key must be defined }, }, }, ], - "envFrom": [ # List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. +optional - { # EnvFromSource represents the source of a set of ConfigMaps - "configMapRef": { # ConfigMapEnvSource selects a ConfigMap to populate the environment variables with. The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables. # The ConfigMap to select from +optional - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "envFrom": [ # (Optional) List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + { # Not supported by Cloud Run EnvFromSource represents the source of a set of ConfigMaps + "configMapRef": { # Not supported by Cloud Run ConfigMapEnvSource selects a ConfigMap to populate the environment variables with. The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables. # (Optional) The ConfigMap to select from + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: not supported Cloud Run for Anthos: supported The ConfigMap to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run for Anthos: supported Specify whether the ConfigMap must be defined +optional + "name": "A String", # The ConfigMap to select from. + "optional": True or False, # (Optional) Specify whether the ConfigMap must be defined }, - "prefix": "A String", # An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. +optional - "secretRef": { # SecretEnvSource selects a Secret to populate the environment variables with. The contents of the target Secret's Data field will represent the key-value pairs as environment variables. # The Secret to select from +optional - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "prefix": "A String", # (Optional) An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + "secretRef": { # Not supported by Cloud Run SecretEnvSource selects a Secret to populate the environment variables with. The contents of the target Secret's Data field will represent the key-value pairs as environment variables. # (Optional) The Secret to select from + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: not supported Cloud Run for Anthos: supported The Secret to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run for Anthos: supported Specify whether the Secret must be defined +optional + "name": "A String", # The Secret to select from. + "optional": True or False, # (Optional) Specify whether the Secret must be defined }, }, ], - "image": "A String", # Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images - "imagePullPolicy": "A String", # Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images +optional - "lifecycle": { # Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted. # Actions that the management system should take in response to container lifecycle events. Cannot be updated. +optional - "postStart": { # Handler defines a specific action that should be taken # PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +optional - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - }, + "image": "A String", # Only supports containers from Google Container Registry or Artifact Registry URL of the Container image. More info: https://kubernetes.io/docs/concepts/containers/images + "imagePullPolicy": "A String", # (Optional) Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + "livenessProbe": { # Not supported by Cloud Run Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # (Optional) Periodic probe of container liveness. Container will be restarted if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "exec": { # Not supported by Cloud Run ExecAction describes a "run in container" action. # (Optional) One and only one of the following should be specified. Exec specifies the action to take. A field inlined from the Handler message. + "command": [ # (Optional) Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + "A String", + ], }, - "preStop": { # Handler defines a specific action that should be taken # PreStop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +optional - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. + "failureThreshold": 42, # (Optional) Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + "httpGet": { # Not supported by Cloud Run HTTPGetAction describes an action based on HTTP Get requests. # (Optional) HTTPGet specifies the http request to perform. A field inlined from the Handler message. + "host": "A String", # (Optional) Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + "httpHeaders": [ # (Optional) Custom headers to set in the request. HTTP allows repeated headers. + { # Not supported by Cloud Run HTTPHeader describes a custom header to be used in HTTP probes + "name": "A String", # The header field name + "value": "A String", # The header field value }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - }, + ], + "path": "A String", # (Optional) Path to access on the HTTP server. + "scheme": "A String", # (Optional) Scheme to use for connecting to the host. Defaults to HTTP. }, - }, - "livenessProbe": { # Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "failureThreshold": 42, # Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. +optional - "handler": { # Handler defines a specific action that should be taken # The action taken to determine the health of a container - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - }, + "initialDelaySeconds": 42, # (Optional) Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "periodSeconds": 42, # (Optional) How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + "successThreshold": 42, # (Optional) Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + "tcpSocket": { # Not supported by Cloud Run TCPSocketAction describes an action based on opening a socket # (Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported A field inlined from the Handler message. + "host": "A String", # (Optional) Optional: Host name to connect to, defaults to the pod IP. + "port": 42, # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. This field is currently limited to integer types only because of proto's inability to properly support the IntOrString golang type. }, - "initialDelaySeconds": 42, # Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "periodSeconds": 42, # How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. +optional - "successThreshold": 42, # Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. +optional - "timeoutSeconds": 42, # Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional + "timeoutSeconds": 42, # (Optional) Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes }, - "name": "A String", # Name of the container specified as a DNS_LABEL. Each container must have a unique name (DNS_LABEL). Cannot be updated. - "ports": [ # List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated. +optional + "name": "A String", # (Optional) Name of the container specified as a DNS_LABEL. Currently unused in Cloud Run. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names + "ports": [ # (Optional) List of ports to expose from the container. Only a single port can be specified. The specified ports must be listening on all interfaces (0.0.0.0) within the container to be accessible. If omitted, a port number will be chosen and passed to the container through the PORT environment variable for the container to listen on. { # ContainerPort represents a network port in a single container. - "containerPort": 42, # Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. - "hostIP": "A String", # What host IP to bind the external port to. +optional - "hostPort": 42, # Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. +optional - "name": "A String", # If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. +optional - "protocol": "A String", # Protocol for port. Must be UDP or TCP. Defaults to "TCP". +optional + "containerPort": 42, # (Optional) Port number the container listens on. This must be a valid port number, 0 < x < 65536. + "name": "A String", # (Optional) If specified, used to specify which protocol to use. Allowed values are "http1" and "h2c". + "protocol": "A String", # (Optional) Protocol for port. Must be "TCP". Defaults to "TCP". }, ], - "readinessProbe": { # Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "failureThreshold": 42, # Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. +optional - "handler": { # Handler defines a specific action that should be taken # The action taken to determine the health of a container - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. + "readinessProbe": { # Not supported by Cloud Run Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # (Optional) Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "exec": { # Not supported by Cloud Run ExecAction describes a "run in container" action. # (Optional) One and only one of the following should be specified. Exec specifies the action to take. A field inlined from the Handler message. + "command": [ # (Optional) Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + "A String", + ], + }, + "failureThreshold": 42, # (Optional) Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + "httpGet": { # Not supported by Cloud Run HTTPGetAction describes an action based on HTTP Get requests. # (Optional) HTTPGet specifies the http request to perform. A field inlined from the Handler message. + "host": "A String", # (Optional) Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + "httpHeaders": [ # (Optional) Custom headers to set in the request. HTTP allows repeated headers. + { # Not supported by Cloud Run HTTPHeader describes a custom header to be used in HTTP probes + "name": "A String", # The header field name + "value": "A String", # The header field value }, - }, + ], + "path": "A String", # (Optional) Path to access on the HTTP server. + "scheme": "A String", # (Optional) Scheme to use for connecting to the host. Defaults to HTTP. }, - "initialDelaySeconds": 42, # Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "periodSeconds": 42, # How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. +optional - "successThreshold": 42, # Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. +optional - "timeoutSeconds": 42, # Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional + "initialDelaySeconds": 42, # (Optional) Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "periodSeconds": 42, # (Optional) How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + "successThreshold": 42, # (Optional) Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + "tcpSocket": { # Not supported by Cloud Run TCPSocketAction describes an action based on opening a socket # (Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported A field inlined from the Handler message. + "host": "A String", # (Optional) Optional: Host name to connect to, defaults to the pod IP. + "port": 42, # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. This field is currently limited to integer types only because of proto's inability to properly support the IntOrString golang type. + }, + "timeoutSeconds": 42, # (Optional) Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes }, - "resources": { # ResourceRequirements describes the compute resource requirements. # Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources +optional - "limits": { # Limits describes the maximum amount of compute resources allowed. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go + "resources": { # ResourceRequirements describes the compute resource requirements. # (Optional) Compute Resources required by this container. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + "limits": { # (Optional) Only memory and CPU are supported. Note: The only supported values for CPU are '1', '2', and '4'. Setting 4 CPU requires at least 2Gi of memory. Limits describes the maximum amount of compute resources allowed. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go "a_key": "A String", }, - "limitsInMap": { # Limits describes the maximum amount of compute resources allowed. This is a temporary field created to migrate away from the map limits field. This is done to become compliant with k8s style API. This field is deprecated in favor of limits field. - "a_key": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto - "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". - }, - }, - "requests": { # Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go + "requests": { # (Optional) Only memory and CPU are supported. Note: The only supported values for CPU are '1' and '2'. Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go "a_key": "A String", }, - "requestsInMap": { # Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. This is a temporary field created to migrate away from the map requests field. This is done to become compliant with k8s style API. This field is deprecated in favor of requests field. - "a_key": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto - "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". - }, - }, }, - "securityContext": { # SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence. # Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +optional - "allowPrivilegeEscalation": True or False, # AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN +optional - "capabilities": { # Adds and removes POSIX capabilities from running containers. # The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. +optional - "add": [ # Added capabilities +optional + "securityContext": { # Not supported by Cloud Run SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence. # (Optional) Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + "runAsUser": 42, # (Optional) The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + }, + "startupProbe": { # Not supported by Cloud Run Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # (Optional) Startup probe of application within the container. All other probes are disabled if a startup probe is provided, until it succeeds. Container will not be added to service endpoints if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "exec": { # Not supported by Cloud Run ExecAction describes a "run in container" action. # (Optional) One and only one of the following should be specified. Exec specifies the action to take. A field inlined from the Handler message. + "command": [ # (Optional) Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. "A String", ], - "drop": [ # Removed capabilities +optional - "A String", + }, + "failureThreshold": 42, # (Optional) Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + "httpGet": { # Not supported by Cloud Run HTTPGetAction describes an action based on HTTP Get requests. # (Optional) HTTPGet specifies the http request to perform. A field inlined from the Handler message. + "host": "A String", # (Optional) Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + "httpHeaders": [ # (Optional) Custom headers to set in the request. HTTP allows repeated headers. + { # Not supported by Cloud Run HTTPHeader describes a custom header to be used in HTTP probes + "name": "A String", # The header field name + "value": "A String", # The header field value + }, ], + "path": "A String", # (Optional) Path to access on the HTTP server. + "scheme": "A String", # (Optional) Scheme to use for connecting to the host. Defaults to HTTP. }, - "privileged": True or False, # Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. +optional - "readOnlyRootFilesystem": True or False, # Whether this container has a read-only root filesystem. Default is false. +optional - "runAsGroup": 42, # The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "runAsNonRoot": True or False, # Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "runAsUser": 42, # The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "seLinuxOptions": { # SELinuxOptions are the labels to be applied to the container # The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "level": "A String", # Level is SELinux level label that applies to the container. +optional - "role": "A String", # Role is a SELinux role label that applies to the container. +optional - "type": "A String", # Type is a SELinux type label that applies to the container. +optional - "user": "A String", # User is a SELinux user label that applies to the container. +optional + "initialDelaySeconds": 42, # (Optional) Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "periodSeconds": 42, # (Optional) How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + "successThreshold": 42, # (Optional) Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + "tcpSocket": { # Not supported by Cloud Run TCPSocketAction describes an action based on opening a socket # (Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported A field inlined from the Handler message. + "host": "A String", # (Optional) Optional: Host name to connect to, defaults to the pod IP. + "port": 42, # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. This field is currently limited to integer types only because of proto's inability to properly support the IntOrString golang type. }, + "timeoutSeconds": 42, # (Optional) Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes }, - "stdin": True or False, # Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. +optional - "stdinOnce": True or False, # Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false +optional - "terminationMessagePath": "A String", # Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. +optional - "terminationMessagePolicy": "A String", # Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. +optional - "tty": True or False, # Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. +optional - "volumeDevices": [ # volumeDevices is the list of block devices to be used by the container. This is an alpha feature and may change in the future. +optional - { # volumeDevice describes a mapping of a raw block device within a container. - "devicePath": "A String", # devicePath is the path inside of the container that the device will be mapped to. - "name": "A String", # name must match the name of a persistentVolumeClaim in the pod - }, - ], - "volumeMounts": [ # Pod volumes to mount into the container's filesystem. Cannot be updated. +optional - { # VolumeMount describes a mounting of a Volume within a container. + "terminationMessagePath": "A String", # (Optional) Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. + "terminationMessagePolicy": "A String", # (Optional) Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + "volumeMounts": [ # (Optional) Volume to mount into the container's filesystem. Only supports SecretVolumeSources. Pod volumes to mount into the container's filesystem. + { # Not supported by Cloud Run VolumeMount describes a mounting of a Volume within a container. "mountPath": "A String", # Path within the container at which the volume should be mounted. Must not contain ':'. - "mountPropagation": "A String", # mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationHostToContainer is used. This field is beta in 1.10. +optional "name": "A String", # This must match the Name of a Volume. - "readOnly": True or False, # Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. +optional - "subPath": "A String", # Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). +optional + "readOnly": True or False, # (Optional) Only true is accepted. Defaults to true. + "subPath": "A String", # (Optional) Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). }, ], - "workingDir": "A String", # Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. +optional + "workingDir": "A String", # (Optional) Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. }, ], "restartPolicy": "A String", # Optional. Restart policy for all containers within the instance. Allowed values are: - OnFailure: Instances will always be restarted on failure if the backoffLimit has not been reached. - Never: Instances are never restarted and all failures are permanent. Cannot be used if backoffLimit is set. +optional "serviceAccountName": "A String", # Optional. Email address of the IAM service account associated with the instance of a Job. The service account represents the identity of the running instance, and determines what permissions the instance has. If not provided, the instance will use the project's default service account. +optional "terminationGracePeriodSeconds": "A String", # Optional. Optional duration in seconds the instance needs to terminate gracefully. Value must be non-negative integer. The value zero indicates delete immediately. The grace period is the duration in seconds after the processes running in the instance are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. +optional "volumes": [ # Optional. List of volumes that can be mounted by containers belonging to the instance. More info: https://kubernetes.io/docs/concepts/storage/volumes +optional - { # Volume represents a named volume in a container. - "configMap": { # Adapts a ConfigMap into a volume. The contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. - "defaultMode": 42, # Mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. - "items": [ # If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. + { # Not supported by Cloud Run Volume represents a named volume in a container. + "configMap": { # Not supported by Cloud Run Adapts a ConfigMap into a volume. The contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. + "defaultMode": 42, # (Optional) Mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + "items": [ # (Optional) If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified that is not present in the Secret, the volume setup will error unless it is marked optional. { # Maps a string key to a path within a volume. - "key": "A String", # Cloud Run fully managed: supported The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. Cloud Run for Anthos: supported The key to project. - "mode": 42, # Mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. +optional - "path": "A String", # Cloud Run fully managed: supported Cloud Run for Anthos: supported The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + "key": "A String", # The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. The key to project. + "mode": 42, # (Optional) Mode bits to use on this file, must be a value between 0000 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + "path": "A String", # The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. }, ], "name": "A String", # Name of the config. - "optional": True or False, # Specify whether the Secret or its keys must be defined. + "optional": True or False, # (Optional) Specify whether the Secret or its keys must be defined. }, "name": "A String", # Volume's name. - "secret": { # The contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. - "defaultMode": 42, # Mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. - "items": [ # Cloud Run fully managed: supported If unspecified, the volume will expose a file whose name is the secret_name. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a key and a path. Cloud Run for Anthos: supported If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. + "secret": { # The secret's value will be presented as the content of a file whose name is defined in the item path. If no items are defined, the name of the file is the secret_name. The contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. + "defaultMode": 42, # (Optional) Mode bits to use on created files by default. Must be a value between 0000 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. NOTE: This is an integer representation of the mode bits. So, the integer value should look exactly as the chmod numeric notation, i.e. Unix chmod "777" (a=rwx) should have the integer value 777. + "items": [ # (Optional) If unspecified, the volume will expose a file whose name is the secret_name. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a key and a path. If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified that is not present in the Secret, the volume setup will error unless it is marked optional. { # Maps a string key to a path within a volume. - "key": "A String", # Cloud Run fully managed: supported The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. Cloud Run for Anthos: supported The key to project. - "mode": 42, # Mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. +optional - "path": "A String", # Cloud Run fully managed: supported Cloud Run for Anthos: supported The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + "key": "A String", # The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. The key to project. + "mode": 42, # (Optional) Mode bits to use on this file, must be a value between 0000 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + "path": "A String", # The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. }, ], - "optional": True or False, # Specify whether the Secret or its keys must be defined. - "secretName": "A String", # Cloud Run fully managed: supported The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. Cloud Run for Anthos: supported Name of the secret in the container's namespace to use. + "optional": True or False, # (Optional) Specify whether the Secret or its keys must be defined. + "secretName": "A String", # The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. Name of the secret in the container's namespace to use. }, }, ], @@ -1307,25 +1007,25 @@

Method Details

{ # Job represents the configuration of a single job. A job an immutable resource that references a container image which is run to completion. "apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources +optional "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds +optional - "metadata": { # ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional - "annotations": { # Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations +optional + "metadata": { # k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional + "annotations": { # (Optional) Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations "a_key": "A String", }, - "clusterName": "A String", # Not currently supported by Cloud Run. The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. +optional - "creationTimestamp": "A String", # CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional - "deletionGracePeriodSeconds": 42, # Not currently supported by Cloud Run. Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. +optional - "deletionTimestamp": "A String", # DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional - "finalizers": [ # Not currently supported by Cloud Run. Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. +optional +patchStrategy=merge + "clusterName": "A String", # (Optional) Not supported by Cloud Run The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + "creationTimestamp": "A String", # (Optional) CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + "deletionGracePeriodSeconds": 42, # (Optional) Not supported by Cloud Run Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. + "deletionTimestamp": "A String", # (Optional) Not supported by Cloud Run DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + "finalizers": [ # (Optional) Not supported by Cloud Run Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. +patchStrategy=merge "A String", ], - "generateName": "A String", # Not currently supported by Cloud Run. GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency +optional string generateName = 2; - "generation": 42, # A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. +optional - "labels": { # Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and routes. More info: http://kubernetes.io/docs/user-guide/labels +optional + "generateName": "A String", # (Optional) Not supported by Cloud Run GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency string generateName = 2; + "generation": 42, # (Optional) A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. + "labels": { # (Optional) Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and routes. More info: http://kubernetes.io/docs/user-guide/labels "a_key": "A String", }, "name": "A String", # Name must be unique within a namespace, within a Cloud Run region. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names +optional "namespace": "A String", # Namespace defines the space within each name must be unique, within a Cloud Run region. In Cloud Run the namespace must be equal to either the project ID or project number. - "ownerReferences": [ # List of objects that own this object. If ALL objects in the list have been deleted, this object will be garbage collected. +optional + "ownerReferences": [ # (Optional) Not supported by Cloud Run List of objects that own this object. If ALL objects in the list have been deleted, this object will be garbage collected. { # OwnerReference contains enough information to let you identify an owning object. Currently, an owning object must be in the same namespace, so there is no namespace field. "apiVersion": "A String", # API version of the referent. "blockOwnerDeletion": True or False, # If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. +optional @@ -1335,9 +1035,9 @@

Method Details

"uid": "A String", # UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids }, ], - "resourceVersion": "A String", # An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency +optional - "selfLink": "A String", # SelfLink is a URL representing this object. Populated by the system. Read-only. +optional string selfLink = 4; - "uid": "A String", # UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids +optional + "resourceVersion": "A String", # Optional. An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server or omit the value to disable conflict-detection. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients or omitted. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + "selfLink": "A String", # (Optional) SelfLink is a URL representing this object. Populated by the system. Read-only. string selfLink = 4; + "uid": "A String", # (Optional) UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids }, "spec": { # JobSpec describes how the job execution will look like. # Optional. Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status +optional "activeDeadlineSeconds": "A String", # Optional. Not supported. Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it. If set to zero, the system will never attempt to terminate the job based on time. Otherwise, the value must be positive integer. +optional @@ -1349,299 +1049,199 @@

Method Details

"activeDeadlineSeconds": "A String", # Optional. Optional duration in seconds the instance may be active relative to StartTime before the system will actively try to mark it failed and kill associated containers. If set to zero, the system will never attempt to kill an instance based on time. Otherwise, value must be a positive integer. +optional "containers": [ # Optional. List of containers belonging to the instance. We disallow a number of fields on this Container. Only a single container may be provided. { # A single application container. This specifies both the container to run, the command to run in the container and the arguments to supply to it. Note that additional arguments may be supplied by the system to the container at runtime. - "args": [ # Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell +optional + "args": [ # (Optional) Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell "A String", ], - "command": [ # Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell +optional + "command": [ "A String", ], - "env": [ # List of environment variables to set in the container. Cannot be updated. +optional + "env": [ # (Optional) List of environment variables to set in the container. { # EnvVar represents an environment variable present in a Container. "name": "A String", # Name of the environment variable. Must be a C_IDENTIFIER. - "value": "A String", # Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". +optional - "valueFrom": { # Cloud Run fully managed: not supported Cloud Run on GKE: supported EnvVarSource represents a source for the value of an EnvVar. # Cloud Run fully managed: supported Source for the environment variable's value. Only supports secret_key_ref. Cloud Run for Anthos: supported Source for the environment variable's value. Cannot be used if value is not empty. +optional - "configMapKeyRef": { # Cloud Run fully managed: not supported Cloud Run on GKE: supported Selects a key from a ConfigMap. # Cloud Run fully managed: not supported Cloud Run on GKE: supported Selects a key of a ConfigMap. +optional - "key": "A String", # Cloud Run fully managed: not supported Cloud Run on GKE: supported The key to select. - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "value": "A String", # (Optional) Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". + "valueFrom": { # EnvVarSource represents a source for the value of an EnvVar. # (Optional) Source for the environment variable's value. Only supports secret_key_ref. Source for the environment variable's value. Cannot be used if value is not empty. + "configMapKeyRef": { # Not supported by Cloud Run Selects a key from a ConfigMap. # (Optional) Not supported by Cloud Run Selects a key of a ConfigMap. + "key": "A String", # The key to select. + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: not supported Cloud Run on GKE: supported The ConfigMap to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run on GKE: supported Specify whether the ConfigMap or its key must be defined +optional + "name": "A String", # The ConfigMap to select from. + "optional": True or False, # (Optional) Specify whether the ConfigMap or its key must be defined }, - "secretKeyRef": { # Cloud Run fully managed: supported Cloud Run on GKE: supported SecretKeySelector selects a key of a Secret. # Cloud Run fully managed: supported. Selects a key (version) of a secret in Secret Manager. Cloud Run for Anthos: supported. Selects a key of a secret in the pod's namespace. +optional - "key": "A String", # Cloud Run fully managed: supported A Cloud Secret Manager secret version. Must be 'latest' for the latest version or an integer for a specific version. Cloud Run for Anthos: supported The key of the secret to select from. Must be a valid secret key. - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "secretKeyRef": { # SecretKeySelector selects a key of a Secret. # (Optional) Selects a key (version) of a secret in Secret Manager. + "key": "A String", # A Cloud Secret Manager secret version. Must be 'latest' for the latest version or an integer for a specific version. The key of the secret to select from. Must be a valid secret key. + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: supported The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. Cloud Run for Anthos: supported The name of the secret in the pod's namespace to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run on GKE: supported Specify whether the Secret or its key must be defined +optional + "name": "A String", # The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. The name of the secret in the pod's namespace to select from. + "optional": True or False, # (Optional) Specify whether the Secret or its key must be defined }, }, }, ], - "envFrom": [ # List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. +optional - { # EnvFromSource represents the source of a set of ConfigMaps - "configMapRef": { # ConfigMapEnvSource selects a ConfigMap to populate the environment variables with. The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables. # The ConfigMap to select from +optional - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "envFrom": [ # (Optional) List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + { # Not supported by Cloud Run EnvFromSource represents the source of a set of ConfigMaps + "configMapRef": { # Not supported by Cloud Run ConfigMapEnvSource selects a ConfigMap to populate the environment variables with. The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables. # (Optional) The ConfigMap to select from + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: not supported Cloud Run for Anthos: supported The ConfigMap to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run for Anthos: supported Specify whether the ConfigMap must be defined +optional + "name": "A String", # The ConfigMap to select from. + "optional": True or False, # (Optional) Specify whether the ConfigMap must be defined }, - "prefix": "A String", # An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. +optional - "secretRef": { # SecretEnvSource selects a Secret to populate the environment variables with. The contents of the target Secret's Data field will represent the key-value pairs as environment variables. # The Secret to select from +optional - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "prefix": "A String", # (Optional) An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + "secretRef": { # Not supported by Cloud Run SecretEnvSource selects a Secret to populate the environment variables with. The contents of the target Secret's Data field will represent the key-value pairs as environment variables. # (Optional) The Secret to select from + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: not supported Cloud Run for Anthos: supported The Secret to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run for Anthos: supported Specify whether the Secret must be defined +optional + "name": "A String", # The Secret to select from. + "optional": True or False, # (Optional) Specify whether the Secret must be defined }, }, ], - "image": "A String", # Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images - "imagePullPolicy": "A String", # Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images +optional - "lifecycle": { # Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted. # Actions that the management system should take in response to container lifecycle events. Cannot be updated. +optional - "postStart": { # Handler defines a specific action that should be taken # PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +optional - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - }, + "image": "A String", # Only supports containers from Google Container Registry or Artifact Registry URL of the Container image. More info: https://kubernetes.io/docs/concepts/containers/images + "imagePullPolicy": "A String", # (Optional) Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + "livenessProbe": { # Not supported by Cloud Run Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # (Optional) Periodic probe of container liveness. Container will be restarted if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "exec": { # Not supported by Cloud Run ExecAction describes a "run in container" action. # (Optional) One and only one of the following should be specified. Exec specifies the action to take. A field inlined from the Handler message. + "command": [ # (Optional) Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + "A String", + ], }, - "preStop": { # Handler defines a specific action that should be taken # PreStop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +optional - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. + "failureThreshold": 42, # (Optional) Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + "httpGet": { # Not supported by Cloud Run HTTPGetAction describes an action based on HTTP Get requests. # (Optional) HTTPGet specifies the http request to perform. A field inlined from the Handler message. + "host": "A String", # (Optional) Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + "httpHeaders": [ # (Optional) Custom headers to set in the request. HTTP allows repeated headers. + { # Not supported by Cloud Run HTTPHeader describes a custom header to be used in HTTP probes + "name": "A String", # The header field name + "value": "A String", # The header field value }, - }, + ], + "path": "A String", # (Optional) Path to access on the HTTP server. + "scheme": "A String", # (Optional) Scheme to use for connecting to the host. Defaults to HTTP. }, - }, - "livenessProbe": { # Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "failureThreshold": 42, # Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. +optional - "handler": { # Handler defines a specific action that should be taken # The action taken to determine the health of a container - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - }, + "initialDelaySeconds": 42, # (Optional) Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "periodSeconds": 42, # (Optional) How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + "successThreshold": 42, # (Optional) Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + "tcpSocket": { # Not supported by Cloud Run TCPSocketAction describes an action based on opening a socket # (Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported A field inlined from the Handler message. + "host": "A String", # (Optional) Optional: Host name to connect to, defaults to the pod IP. + "port": 42, # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. This field is currently limited to integer types only because of proto's inability to properly support the IntOrString golang type. }, - "initialDelaySeconds": 42, # Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "periodSeconds": 42, # How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. +optional - "successThreshold": 42, # Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. +optional - "timeoutSeconds": 42, # Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional + "timeoutSeconds": 42, # (Optional) Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes }, - "name": "A String", # Name of the container specified as a DNS_LABEL. Each container must have a unique name (DNS_LABEL). Cannot be updated. - "ports": [ # List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated. +optional + "name": "A String", # (Optional) Name of the container specified as a DNS_LABEL. Currently unused in Cloud Run. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names + "ports": [ # (Optional) List of ports to expose from the container. Only a single port can be specified. The specified ports must be listening on all interfaces (0.0.0.0) within the container to be accessible. If omitted, a port number will be chosen and passed to the container through the PORT environment variable for the container to listen on. { # ContainerPort represents a network port in a single container. - "containerPort": 42, # Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. - "hostIP": "A String", # What host IP to bind the external port to. +optional - "hostPort": 42, # Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. +optional - "name": "A String", # If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. +optional - "protocol": "A String", # Protocol for port. Must be UDP or TCP. Defaults to "TCP". +optional + "containerPort": 42, # (Optional) Port number the container listens on. This must be a valid port number, 0 < x < 65536. + "name": "A String", # (Optional) If specified, used to specify which protocol to use. Allowed values are "http1" and "h2c". + "protocol": "A String", # (Optional) Protocol for port. Must be "TCP". Defaults to "TCP". }, ], - "readinessProbe": { # Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "failureThreshold": 42, # Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. +optional - "handler": { # Handler defines a specific action that should be taken # The action taken to determine the health of a container - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. + "readinessProbe": { # Not supported by Cloud Run Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # (Optional) Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "exec": { # Not supported by Cloud Run ExecAction describes a "run in container" action. # (Optional) One and only one of the following should be specified. Exec specifies the action to take. A field inlined from the Handler message. + "command": [ # (Optional) Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + "A String", + ], + }, + "failureThreshold": 42, # (Optional) Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + "httpGet": { # Not supported by Cloud Run HTTPGetAction describes an action based on HTTP Get requests. # (Optional) HTTPGet specifies the http request to perform. A field inlined from the Handler message. + "host": "A String", # (Optional) Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + "httpHeaders": [ # (Optional) Custom headers to set in the request. HTTP allows repeated headers. + { # Not supported by Cloud Run HTTPHeader describes a custom header to be used in HTTP probes + "name": "A String", # The header field name + "value": "A String", # The header field value }, - }, + ], + "path": "A String", # (Optional) Path to access on the HTTP server. + "scheme": "A String", # (Optional) Scheme to use for connecting to the host. Defaults to HTTP. }, - "initialDelaySeconds": 42, # Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "periodSeconds": 42, # How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. +optional - "successThreshold": 42, # Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. +optional - "timeoutSeconds": 42, # Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional + "initialDelaySeconds": 42, # (Optional) Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "periodSeconds": 42, # (Optional) How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + "successThreshold": 42, # (Optional) Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + "tcpSocket": { # Not supported by Cloud Run TCPSocketAction describes an action based on opening a socket # (Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported A field inlined from the Handler message. + "host": "A String", # (Optional) Optional: Host name to connect to, defaults to the pod IP. + "port": 42, # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. This field is currently limited to integer types only because of proto's inability to properly support the IntOrString golang type. + }, + "timeoutSeconds": 42, # (Optional) Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes }, - "resources": { # ResourceRequirements describes the compute resource requirements. # Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources +optional - "limits": { # Limits describes the maximum amount of compute resources allowed. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go + "resources": { # ResourceRequirements describes the compute resource requirements. # (Optional) Compute Resources required by this container. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + "limits": { # (Optional) Only memory and CPU are supported. Note: The only supported values for CPU are '1', '2', and '4'. Setting 4 CPU requires at least 2Gi of memory. Limits describes the maximum amount of compute resources allowed. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go "a_key": "A String", }, - "limitsInMap": { # Limits describes the maximum amount of compute resources allowed. This is a temporary field created to migrate away from the map limits field. This is done to become compliant with k8s style API. This field is deprecated in favor of limits field. - "a_key": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto - "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". - }, - }, - "requests": { # Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go + "requests": { # (Optional) Only memory and CPU are supported. Note: The only supported values for CPU are '1' and '2'. Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go "a_key": "A String", }, - "requestsInMap": { # Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. This is a temporary field created to migrate away from the map requests field. This is done to become compliant with k8s style API. This field is deprecated in favor of requests field. - "a_key": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto - "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". - }, - }, }, - "securityContext": { # SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence. # Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +optional - "allowPrivilegeEscalation": True or False, # AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN +optional - "capabilities": { # Adds and removes POSIX capabilities from running containers. # The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. +optional - "add": [ # Added capabilities +optional + "securityContext": { # Not supported by Cloud Run SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence. # (Optional) Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + "runAsUser": 42, # (Optional) The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + }, + "startupProbe": { # Not supported by Cloud Run Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # (Optional) Startup probe of application within the container. All other probes are disabled if a startup probe is provided, until it succeeds. Container will not be added to service endpoints if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "exec": { # Not supported by Cloud Run ExecAction describes a "run in container" action. # (Optional) One and only one of the following should be specified. Exec specifies the action to take. A field inlined from the Handler message. + "command": [ # (Optional) Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. "A String", ], - "drop": [ # Removed capabilities +optional - "A String", + }, + "failureThreshold": 42, # (Optional) Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + "httpGet": { # Not supported by Cloud Run HTTPGetAction describes an action based on HTTP Get requests. # (Optional) HTTPGet specifies the http request to perform. A field inlined from the Handler message. + "host": "A String", # (Optional) Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + "httpHeaders": [ # (Optional) Custom headers to set in the request. HTTP allows repeated headers. + { # Not supported by Cloud Run HTTPHeader describes a custom header to be used in HTTP probes + "name": "A String", # The header field name + "value": "A String", # The header field value + }, ], + "path": "A String", # (Optional) Path to access on the HTTP server. + "scheme": "A String", # (Optional) Scheme to use for connecting to the host. Defaults to HTTP. }, - "privileged": True or False, # Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. +optional - "readOnlyRootFilesystem": True or False, # Whether this container has a read-only root filesystem. Default is false. +optional - "runAsGroup": 42, # The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "runAsNonRoot": True or False, # Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "runAsUser": 42, # The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "seLinuxOptions": { # SELinuxOptions are the labels to be applied to the container # The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "level": "A String", # Level is SELinux level label that applies to the container. +optional - "role": "A String", # Role is a SELinux role label that applies to the container. +optional - "type": "A String", # Type is a SELinux type label that applies to the container. +optional - "user": "A String", # User is a SELinux user label that applies to the container. +optional + "initialDelaySeconds": 42, # (Optional) Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "periodSeconds": 42, # (Optional) How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + "successThreshold": 42, # (Optional) Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + "tcpSocket": { # Not supported by Cloud Run TCPSocketAction describes an action based on opening a socket # (Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported A field inlined from the Handler message. + "host": "A String", # (Optional) Optional: Host name to connect to, defaults to the pod IP. + "port": 42, # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. This field is currently limited to integer types only because of proto's inability to properly support the IntOrString golang type. }, + "timeoutSeconds": 42, # (Optional) Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes }, - "stdin": True or False, # Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. +optional - "stdinOnce": True or False, # Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false +optional - "terminationMessagePath": "A String", # Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. +optional - "terminationMessagePolicy": "A String", # Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. +optional - "tty": True or False, # Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. +optional - "volumeDevices": [ # volumeDevices is the list of block devices to be used by the container. This is an alpha feature and may change in the future. +optional - { # volumeDevice describes a mapping of a raw block device within a container. - "devicePath": "A String", # devicePath is the path inside of the container that the device will be mapped to. - "name": "A String", # name must match the name of a persistentVolumeClaim in the pod - }, - ], - "volumeMounts": [ # Pod volumes to mount into the container's filesystem. Cannot be updated. +optional - { # VolumeMount describes a mounting of a Volume within a container. + "terminationMessagePath": "A String", # (Optional) Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. + "terminationMessagePolicy": "A String", # (Optional) Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + "volumeMounts": [ # (Optional) Volume to mount into the container's filesystem. Only supports SecretVolumeSources. Pod volumes to mount into the container's filesystem. + { # Not supported by Cloud Run VolumeMount describes a mounting of a Volume within a container. "mountPath": "A String", # Path within the container at which the volume should be mounted. Must not contain ':'. - "mountPropagation": "A String", # mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationHostToContainer is used. This field is beta in 1.10. +optional "name": "A String", # This must match the Name of a Volume. - "readOnly": True or False, # Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. +optional - "subPath": "A String", # Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). +optional + "readOnly": True or False, # (Optional) Only true is accepted. Defaults to true. + "subPath": "A String", # (Optional) Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). }, ], - "workingDir": "A String", # Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. +optional + "workingDir": "A String", # (Optional) Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. }, ], "restartPolicy": "A String", # Optional. Restart policy for all containers within the instance. Allowed values are: - OnFailure: Instances will always be restarted on failure if the backoffLimit has not been reached. - Never: Instances are never restarted and all failures are permanent. Cannot be used if backoffLimit is set. +optional "serviceAccountName": "A String", # Optional. Email address of the IAM service account associated with the instance of a Job. The service account represents the identity of the running instance, and determines what permissions the instance has. If not provided, the instance will use the project's default service account. +optional "terminationGracePeriodSeconds": "A String", # Optional. Optional duration in seconds the instance needs to terminate gracefully. Value must be non-negative integer. The value zero indicates delete immediately. The grace period is the duration in seconds after the processes running in the instance are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. +optional "volumes": [ # Optional. List of volumes that can be mounted by containers belonging to the instance. More info: https://kubernetes.io/docs/concepts/storage/volumes +optional - { # Volume represents a named volume in a container. - "configMap": { # Adapts a ConfigMap into a volume. The contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. - "defaultMode": 42, # Mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. - "items": [ # If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. + { # Not supported by Cloud Run Volume represents a named volume in a container. + "configMap": { # Not supported by Cloud Run Adapts a ConfigMap into a volume. The contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. + "defaultMode": 42, # (Optional) Mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + "items": [ # (Optional) If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified that is not present in the Secret, the volume setup will error unless it is marked optional. { # Maps a string key to a path within a volume. - "key": "A String", # Cloud Run fully managed: supported The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. Cloud Run for Anthos: supported The key to project. - "mode": 42, # Mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. +optional - "path": "A String", # Cloud Run fully managed: supported Cloud Run for Anthos: supported The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + "key": "A String", # The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. The key to project. + "mode": 42, # (Optional) Mode bits to use on this file, must be a value between 0000 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + "path": "A String", # The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. }, ], "name": "A String", # Name of the config. - "optional": True or False, # Specify whether the Secret or its keys must be defined. + "optional": True or False, # (Optional) Specify whether the Secret or its keys must be defined. }, "name": "A String", # Volume's name. - "secret": { # The contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. - "defaultMode": 42, # Mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. - "items": [ # Cloud Run fully managed: supported If unspecified, the volume will expose a file whose name is the secret_name. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a key and a path. Cloud Run for Anthos: supported If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. + "secret": { # The secret's value will be presented as the content of a file whose name is defined in the item path. If no items are defined, the name of the file is the secret_name. The contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. + "defaultMode": 42, # (Optional) Mode bits to use on created files by default. Must be a value between 0000 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. NOTE: This is an integer representation of the mode bits. So, the integer value should look exactly as the chmod numeric notation, i.e. Unix chmod "777" (a=rwx) should have the integer value 777. + "items": [ # (Optional) If unspecified, the volume will expose a file whose name is the secret_name. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a key and a path. If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified that is not present in the Secret, the volume setup will error unless it is marked optional. { # Maps a string key to a path within a volume. - "key": "A String", # Cloud Run fully managed: supported The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. Cloud Run for Anthos: supported The key to project. - "mode": 42, # Mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. +optional - "path": "A String", # Cloud Run fully managed: supported Cloud Run for Anthos: supported The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + "key": "A String", # The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. The key to project. + "mode": 42, # (Optional) Mode bits to use on this file, must be a value between 0000 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + "path": "A String", # The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. }, ], - "optional": True or False, # Specify whether the Secret or its keys must be defined. - "secretName": "A String", # Cloud Run fully managed: supported The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. Cloud Run for Anthos: supported Name of the secret in the container's namespace to use. + "optional": True or False, # (Optional) Specify whether the Secret or its keys must be defined. + "secretName": "A String", # The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. Name of the secret in the container's namespace to use. }, }, ], diff --git a/docs/dyn/slides_v1.presentations.html b/docs/dyn/slides_v1.presentations.html index d9b1e8fd411..b30cbbfdce8 100644 --- a/docs/dyn/slides_v1.presentations.html +++ b/docs/dyn/slides_v1.presentations.html @@ -2031,6 +2031,7 @@

Method Details

"pageType": "A String", # The type of the page. "revisionId": "A String", # The revision ID of the presentation containing this page. Can be used in update requests to assert that the presentation revision hasn't changed since the last read operation. Only populated if the user has edit access to the presentation. The format of the revision ID may change over time, so it should be treated opaquely. A returned revision ID is only guaranteed to be valid for 24 hours after it has been returned and cannot be shared across users. If the revision ID is unchanged between calls, then the presentation has not changed. Conversely, a changed ID (for the same presentation and user) usually means the presentation has been updated; however, a changed ID can also be due to internal factors such as ID format changes. "slideProperties": { # The properties of Page that are only relevant for pages with page_type SLIDE. # Slide specific properties. Only set if page_type = SLIDE. + "isSkipped": True or False, # Whether the slide is skipped in the presentation mode. Defaults to false. "layoutObjectId": "A String", # The object ID of the layout that this slide is based on. This property is read-only. "masterObjectId": "A String", # The object ID of the master that this slide is based on. This property is read-only. "notesPage": # Object with schema name: Page # The notes page that this slide is associated with. It defines the visual appearance of a notes page when printing or exporting slides with speaker notes. A notes page inherits properties from the notes master. The placeholder shape with type BODY on the notes page contains the speaker notes for this slide. The ID of this shape is identified by the speakerNotesObjectId field. The notes page is read-only except for the text content and styles of the speaker notes shape. This property is read-only. @@ -3041,6 +3042,7 @@

Method Details

"pageType": "A String", # The type of the page. "revisionId": "A String", # The revision ID of the presentation containing this page. Can be used in update requests to assert that the presentation revision hasn't changed since the last read operation. Only populated if the user has edit access to the presentation. The format of the revision ID may change over time, so it should be treated opaquely. A returned revision ID is only guaranteed to be valid for 24 hours after it has been returned and cannot be shared across users. If the revision ID is unchanged between calls, then the presentation has not changed. Conversely, a changed ID (for the same presentation and user) usually means the presentation has been updated; however, a changed ID can also be due to internal factors such as ID format changes. "slideProperties": { # The properties of Page that are only relevant for pages with page_type SLIDE. # Slide specific properties. Only set if page_type = SLIDE. + "isSkipped": True or False, # Whether the slide is skipped in the presentation mode. Defaults to false. "layoutObjectId": "A String", # The object ID of the layout that this slide is based on. This property is read-only. "masterObjectId": "A String", # The object ID of the master that this slide is based on. This property is read-only. "notesPage": # Object with schema name: Page # The notes page that this slide is associated with. It defines the visual appearance of a notes page when printing or exporting slides with speaker notes. A notes page inherits properties from the notes master. The placeholder shape with type BODY on the notes page contains the speaker notes for this slide. The ID of this shape is identified by the speakerNotesObjectId field. The notes page is read-only except for the text content and styles of the speaker notes shape. This property is read-only. @@ -4049,6 +4051,7 @@

Method Details

"pageType": "A String", # The type of the page. "revisionId": "A String", # The revision ID of the presentation containing this page. Can be used in update requests to assert that the presentation revision hasn't changed since the last read operation. Only populated if the user has edit access to the presentation. The format of the revision ID may change over time, so it should be treated opaquely. A returned revision ID is only guaranteed to be valid for 24 hours after it has been returned and cannot be shared across users. If the revision ID is unchanged between calls, then the presentation has not changed. Conversely, a changed ID (for the same presentation and user) usually means the presentation has been updated; however, a changed ID can also be due to internal factors such as ID format changes. "slideProperties": { # The properties of Page that are only relevant for pages with page_type SLIDE. # Slide specific properties. Only set if page_type = SLIDE. + "isSkipped": True or False, # Whether the slide is skipped in the presentation mode. Defaults to false. "layoutObjectId": "A String", # The object ID of the layout that this slide is based on. This property is read-only. "masterObjectId": "A String", # The object ID of the master that this slide is based on. This property is read-only. "notesPage": # Object with schema name: Page # The notes page that this slide is associated with. It defines the visual appearance of a notes page when printing or exporting slides with speaker notes. A notes page inherits properties from the notes master. The placeholder shape with type BODY on the notes page contains the speaker notes for this slide. The ID of this shape is identified by the speakerNotesObjectId field. The notes page is read-only except for the text content and styles of the speaker notes shape. This property is read-only. @@ -5069,6 +5072,7 @@

Method Details

"pageType": "A String", # The type of the page. "revisionId": "A String", # The revision ID of the presentation containing this page. Can be used in update requests to assert that the presentation revision hasn't changed since the last read operation. Only populated if the user has edit access to the presentation. The format of the revision ID may change over time, so it should be treated opaquely. A returned revision ID is only guaranteed to be valid for 24 hours after it has been returned and cannot be shared across users. If the revision ID is unchanged between calls, then the presentation has not changed. Conversely, a changed ID (for the same presentation and user) usually means the presentation has been updated; however, a changed ID can also be due to internal factors such as ID format changes. "slideProperties": { # The properties of Page that are only relevant for pages with page_type SLIDE. # Slide specific properties. Only set if page_type = SLIDE. + "isSkipped": True or False, # Whether the slide is skipped in the presentation mode. Defaults to false. "layoutObjectId": "A String", # The object ID of the layout that this slide is based on. This property is read-only. "masterObjectId": "A String", # The object ID of the master that this slide is based on. This property is read-only. "notesPage": # Object with schema name: Page # The notes page that this slide is associated with. It defines the visual appearance of a notes page when printing or exporting slides with speaker notes. A notes page inherits properties from the notes master. The placeholder shape with type BODY on the notes page contains the speaker notes for this slide. The ID of this shape is identified by the speakerNotesObjectId field. The notes page is read-only except for the text content and styles of the speaker notes shape. This property is read-only. @@ -6090,6 +6094,7 @@

Method Details

"pageType": "A String", # The type of the page. "revisionId": "A String", # The revision ID of the presentation containing this page. Can be used in update requests to assert that the presentation revision hasn't changed since the last read operation. Only populated if the user has edit access to the presentation. The format of the revision ID may change over time, so it should be treated opaquely. A returned revision ID is only guaranteed to be valid for 24 hours after it has been returned and cannot be shared across users. If the revision ID is unchanged between calls, then the presentation has not changed. Conversely, a changed ID (for the same presentation and user) usually means the presentation has been updated; however, a changed ID can also be due to internal factors such as ID format changes. "slideProperties": { # The properties of Page that are only relevant for pages with page_type SLIDE. # Slide specific properties. Only set if page_type = SLIDE. + "isSkipped": True or False, # Whether the slide is skipped in the presentation mode. Defaults to false. "layoutObjectId": "A String", # The object ID of the layout that this slide is based on. This property is read-only. "masterObjectId": "A String", # The object ID of the master that this slide is based on. This property is read-only. "notesPage": # Object with schema name: Page # The notes page that this slide is associated with. It defines the visual appearance of a notes page when printing or exporting slides with speaker notes. A notes page inherits properties from the notes master. The placeholder shape with type BODY on the notes page contains the speaker notes for this slide. The ID of this shape is identified by the speakerNotesObjectId field. The notes page is read-only except for the text content and styles of the speaker notes shape. This property is read-only. @@ -7100,6 +7105,7 @@

Method Details

"pageType": "A String", # The type of the page. "revisionId": "A String", # The revision ID of the presentation containing this page. Can be used in update requests to assert that the presentation revision hasn't changed since the last read operation. Only populated if the user has edit access to the presentation. The format of the revision ID may change over time, so it should be treated opaquely. A returned revision ID is only guaranteed to be valid for 24 hours after it has been returned and cannot be shared across users. If the revision ID is unchanged between calls, then the presentation has not changed. Conversely, a changed ID (for the same presentation and user) usually means the presentation has been updated; however, a changed ID can also be due to internal factors such as ID format changes. "slideProperties": { # The properties of Page that are only relevant for pages with page_type SLIDE. # Slide specific properties. Only set if page_type = SLIDE. + "isSkipped": True or False, # Whether the slide is skipped in the presentation mode. Defaults to false. "layoutObjectId": "A String", # The object ID of the layout that this slide is based on. This property is read-only. "masterObjectId": "A String", # The object ID of the master that this slide is based on. This property is read-only. "notesPage": # Object with schema name: Page # The notes page that this slide is associated with. It defines the visual appearance of a notes page when printing or exporting slides with speaker notes. A notes page inherits properties from the notes master. The placeholder shape with type BODY on the notes page contains the speaker notes for this slide. The ID of this shape is identified by the speakerNotesObjectId field. The notes page is read-only except for the text content and styles of the speaker notes shape. This property is read-only. @@ -8108,6 +8114,7 @@

Method Details

"pageType": "A String", # The type of the page. "revisionId": "A String", # The revision ID of the presentation containing this page. Can be used in update requests to assert that the presentation revision hasn't changed since the last read operation. Only populated if the user has edit access to the presentation. The format of the revision ID may change over time, so it should be treated opaquely. A returned revision ID is only guaranteed to be valid for 24 hours after it has been returned and cannot be shared across users. If the revision ID is unchanged between calls, then the presentation has not changed. Conversely, a changed ID (for the same presentation and user) usually means the presentation has been updated; however, a changed ID can also be due to internal factors such as ID format changes. "slideProperties": { # The properties of Page that are only relevant for pages with page_type SLIDE. # Slide specific properties. Only set if page_type = SLIDE. + "isSkipped": True or False, # Whether the slide is skipped in the presentation mode. Defaults to false. "layoutObjectId": "A String", # The object ID of the layout that this slide is based on. This property is read-only. "masterObjectId": "A String", # The object ID of the master that this slide is based on. This property is read-only. "notesPage": # Object with schema name: Page # The notes page that this slide is associated with. It defines the visual appearance of a notes page when printing or exporting slides with speaker notes. A notes page inherits properties from the notes master. The placeholder shape with type BODY on the notes page contains the speaker notes for this slide. The ID of this shape is identified by the speakerNotesObjectId field. The notes page is read-only except for the text content and styles of the speaker notes shape. This property is read-only. @@ -9128,6 +9135,7 @@

Method Details

"pageType": "A String", # The type of the page. "revisionId": "A String", # The revision ID of the presentation containing this page. Can be used in update requests to assert that the presentation revision hasn't changed since the last read operation. Only populated if the user has edit access to the presentation. The format of the revision ID may change over time, so it should be treated opaquely. A returned revision ID is only guaranteed to be valid for 24 hours after it has been returned and cannot be shared across users. If the revision ID is unchanged between calls, then the presentation has not changed. Conversely, a changed ID (for the same presentation and user) usually means the presentation has been updated; however, a changed ID can also be due to internal factors such as ID format changes. "slideProperties": { # The properties of Page that are only relevant for pages with page_type SLIDE. # Slide specific properties. Only set if page_type = SLIDE. + "isSkipped": True or False, # Whether the slide is skipped in the presentation mode. Defaults to false. "layoutObjectId": "A String", # The object ID of the layout that this slide is based on. This property is read-only. "masterObjectId": "A String", # The object ID of the master that this slide is based on. This property is read-only. "notesPage": # Object with schema name: Page # The notes page that this slide is associated with. It defines the visual appearance of a notes page when printing or exporting slides with speaker notes. A notes page inherits properties from the notes master. The placeholder shape with type BODY on the notes page contains the speaker notes for this slide. The ID of this shape is identified by the speakerNotesObjectId field. The notes page is read-only except for the text content and styles of the speaker notes shape. This property is read-only. @@ -10156,6 +10164,7 @@

Method Details

"pageType": "A String", # The type of the page. "revisionId": "A String", # The revision ID of the presentation containing this page. Can be used in update requests to assert that the presentation revision hasn't changed since the last read operation. Only populated if the user has edit access to the presentation. The format of the revision ID may change over time, so it should be treated opaquely. A returned revision ID is only guaranteed to be valid for 24 hours after it has been returned and cannot be shared across users. If the revision ID is unchanged between calls, then the presentation has not changed. Conversely, a changed ID (for the same presentation and user) usually means the presentation has been updated; however, a changed ID can also be due to internal factors such as ID format changes. "slideProperties": { # The properties of Page that are only relevant for pages with page_type SLIDE. # Slide specific properties. Only set if page_type = SLIDE. + "isSkipped": True or False, # Whether the slide is skipped in the presentation mode. Defaults to false. "layoutObjectId": "A String", # The object ID of the layout that this slide is based on. This property is read-only. "masterObjectId": "A String", # The object ID of the master that this slide is based on. This property is read-only. "notesPage": # Object with schema name: Page # The notes page that this slide is associated with. It defines the visual appearance of a notes page when printing or exporting slides with speaker notes. A notes page inherits properties from the notes master. The placeholder shape with type BODY on the notes page contains the speaker notes for this slide. The ID of this shape is identified by the speakerNotesObjectId field. The notes page is read-only except for the text content and styles of the speaker notes shape. This property is read-only. @@ -11166,6 +11175,7 @@

Method Details

"pageType": "A String", # The type of the page. "revisionId": "A String", # The revision ID of the presentation containing this page. Can be used in update requests to assert that the presentation revision hasn't changed since the last read operation. Only populated if the user has edit access to the presentation. The format of the revision ID may change over time, so it should be treated opaquely. A returned revision ID is only guaranteed to be valid for 24 hours after it has been returned and cannot be shared across users. If the revision ID is unchanged between calls, then the presentation has not changed. Conversely, a changed ID (for the same presentation and user) usually means the presentation has been updated; however, a changed ID can also be due to internal factors such as ID format changes. "slideProperties": { # The properties of Page that are only relevant for pages with page_type SLIDE. # Slide specific properties. Only set if page_type = SLIDE. + "isSkipped": True or False, # Whether the slide is skipped in the presentation mode. Defaults to false. "layoutObjectId": "A String", # The object ID of the layout that this slide is based on. This property is read-only. "masterObjectId": "A String", # The object ID of the master that this slide is based on. This property is read-only. "notesPage": # Object with schema name: Page # The notes page that this slide is associated with. It defines the visual appearance of a notes page when printing or exporting slides with speaker notes. A notes page inherits properties from the notes master. The placeholder shape with type BODY on the notes page contains the speaker notes for this slide. The ID of this shape is identified by the speakerNotesObjectId field. The notes page is read-only except for the text content and styles of the speaker notes shape. This property is read-only. @@ -12174,6 +12184,7 @@

Method Details

"pageType": "A String", # The type of the page. "revisionId": "A String", # The revision ID of the presentation containing this page. Can be used in update requests to assert that the presentation revision hasn't changed since the last read operation. Only populated if the user has edit access to the presentation. The format of the revision ID may change over time, so it should be treated opaquely. A returned revision ID is only guaranteed to be valid for 24 hours after it has been returned and cannot be shared across users. If the revision ID is unchanged between calls, then the presentation has not changed. Conversely, a changed ID (for the same presentation and user) usually means the presentation has been updated; however, a changed ID can also be due to internal factors such as ID format changes. "slideProperties": { # The properties of Page that are only relevant for pages with page_type SLIDE. # Slide specific properties. Only set if page_type = SLIDE. + "isSkipped": True or False, # Whether the slide is skipped in the presentation mode. Defaults to false. "layoutObjectId": "A String", # The object ID of the layout that this slide is based on. This property is read-only. "masterObjectId": "A String", # The object ID of the master that this slide is based on. This property is read-only. "notesPage": # Object with schema name: Page # The notes page that this slide is associated with. It defines the visual appearance of a notes page when printing or exporting slides with speaker notes. A notes page inherits properties from the notes master. The placeholder shape with type BODY on the notes page contains the speaker notes for this slide. The ID of this shape is identified by the speakerNotesObjectId field. The notes page is read-only except for the text content and styles of the speaker notes shape. This property is read-only. @@ -13194,6 +13205,7 @@

Method Details

"pageType": "A String", # The type of the page. "revisionId": "A String", # The revision ID of the presentation containing this page. Can be used in update requests to assert that the presentation revision hasn't changed since the last read operation. Only populated if the user has edit access to the presentation. The format of the revision ID may change over time, so it should be treated opaquely. A returned revision ID is only guaranteed to be valid for 24 hours after it has been returned and cannot be shared across users. If the revision ID is unchanged between calls, then the presentation has not changed. Conversely, a changed ID (for the same presentation and user) usually means the presentation has been updated; however, a changed ID can also be due to internal factors such as ID format changes. "slideProperties": { # The properties of Page that are only relevant for pages with page_type SLIDE. # Slide specific properties. Only set if page_type = SLIDE. + "isSkipped": True or False, # Whether the slide is skipped in the presentation mode. Defaults to false. "layoutObjectId": "A String", # The object ID of the layout that this slide is based on. This property is read-only. "masterObjectId": "A String", # The object ID of the master that this slide is based on. This property is read-only. "notesPage": # Object with schema name: Page # The notes page that this slide is associated with. It defines the visual appearance of a notes page when printing or exporting slides with speaker notes. A notes page inherits properties from the notes master. The placeholder shape with type BODY on the notes page contains the speaker notes for this slide. The ID of this shape is identified by the speakerNotesObjectId field. The notes page is read-only except for the text content and styles of the speaker notes shape. This property is read-only. diff --git a/docs/dyn/slides_v1.presentations.pages.html b/docs/dyn/slides_v1.presentations.pages.html index e594c79e1f7..89a500b6e26 100644 --- a/docs/dyn/slides_v1.presentations.pages.html +++ b/docs/dyn/slides_v1.presentations.pages.html @@ -1106,6 +1106,7 @@

Method Details

"pageType": "A String", # The type of the page. "revisionId": "A String", # The revision ID of the presentation containing this page. Can be used in update requests to assert that the presentation revision hasn't changed since the last read operation. Only populated if the user has edit access to the presentation. The format of the revision ID may change over time, so it should be treated opaquely. A returned revision ID is only guaranteed to be valid for 24 hours after it has been returned and cannot be shared across users. If the revision ID is unchanged between calls, then the presentation has not changed. Conversely, a changed ID (for the same presentation and user) usually means the presentation has been updated; however, a changed ID can also be due to internal factors such as ID format changes. "slideProperties": { # The properties of Page that are only relevant for pages with page_type SLIDE. # Slide specific properties. Only set if page_type = SLIDE. + "isSkipped": True or False, # Whether the slide is skipped in the presentation mode. Defaults to false. "layoutObjectId": "A String", # The object ID of the layout that this slide is based on. This property is read-only. "masterObjectId": "A String", # The object ID of the master that this slide is based on. This property is read-only. "notesPage": # Object with schema name: Page # The notes page that this slide is associated with. It defines the visual appearance of a notes page when printing or exporting slides with speaker notes. A notes page inherits properties from the notes master. The placeholder shape with type BODY on the notes page contains the speaker notes for this slide. The ID of this shape is identified by the speakerNotesObjectId field. The notes page is read-only except for the text content and styles of the speaker notes shape. This property is read-only. diff --git a/docs/dyn/spanner_v1.projects.instances.databases.html b/docs/dyn/spanner_v1.projects.instances.databases.html index aa4b0dc431d..a3a1780ca3c 100644 --- a/docs/dyn/spanner_v1.projects.instances.databases.html +++ b/docs/dyn/spanner_v1.projects.instances.databases.html @@ -455,6 +455,7 @@

Method Details

"token": "A String", # The token identifying the message, e.g. 'METRIC_READ_CPU'. This should be unique within the service. }, "startKeyIndex": 42, # The index of the start key in indexed_keys. + "timeOffset": "A String", # The time offset. This is the time since the start of the time interval. "unit": { # A message representing a user-facing string whose value may need to be translated before being displayed. # The unit of the metric. This is an unstructured field and will be mapped as is to the user. "args": { # A map of arguments used when creating the localized message. Keys represent parameter names which may be used by the localized version when substituting dynamic values. "a_key": "A String", diff --git a/docs/dyn/spanner_v1.projects.instances.databases.sessions.html b/docs/dyn/spanner_v1.projects.instances.databases.sessions.html index 1cbdde8390f..a3e75975498 100644 --- a/docs/dyn/spanner_v1.projects.instances.databases.sessions.html +++ b/docs/dyn/spanner_v1.projects.instances.databases.sessions.html @@ -179,7 +179,7 @@

Method Details

The object takes the form of: { # The request for BeginTransaction. - "options": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Required. Options for the new transaction. + "options": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Required. Options for the new transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -310,7 +310,7 @@

Method Details

"transactionTag": "A String", # A tag used for statistics collection about this transaction. Both request_tag and transaction_tag can be specified for a read or query that belongs to a transaction. The value of transaction_tag should be the same for all requests belonging to the same transaction. If this request doesn’t belong to any transaction, transaction_tag will be ignored. Legal characters for `transaction_tag` values are all printable characters (ASCII 32 - 126) and the length of a transaction_tag is limited to 50 characters. Values that exceed this limit are truncated. }, "returnCommitStats": True or False, # If `true`, then statistics related to the transaction will be included in the CommitResponse. Default value is `false`. - "singleUseTransaction": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute mutations in a temporary transaction. Note that unlike commit of a previously-started transaction, commit with a temporary transaction is non-idempotent. That is, if the `CommitRequest` is sent to Cloud Spanner more than once (for instance, due to retries in the application, or in the transport library), it is possible that the mutations are executed more than once. If this is undesirable, use BeginTransaction and Commit instead. + "singleUseTransaction": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute mutations in a temporary transaction. Note that unlike commit of a previously-started transaction, commit with a temporary transaction is non-idempotent. That is, if the `CommitRequest` is sent to Cloud Spanner more than once (for instance, due to retries in the application, or in the transport library), it is possible that the mutations are executed more than once. If this is undesirable, use BeginTransaction and Commit instead. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -421,14 +421,7 @@

Method Details

"a_key": { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a table cell or returned from an SQL query. "arrayElementType": # Object with schema name: Type # If code == ARRAY, then `array_element_type` is the type of the array elements. "code": "A String", # Required. The TypeCode for this type. - "structType": { # `StructType` defines the fields of a STRUCT type. # If code == STRUCT, then `struct_type` provides type information for the struct's fields. - "fields": [ # The list of fields that make up this struct. Order is significant, because values of this struct type are represented as lists, where the order of field values matches the order of fields in the StructType. In turn, the order of fields matches the order of columns in a read request, or the order of fields in the `SELECT` clause of a query. - { # Message representing a single field of a struct. - "name": "A String", # The name of the field. For reads, this is the column name. For SQL queries, it is the column alias (e.g., `"Word"` in the query `"SELECT 'hello' AS Word"`), or the column name (e.g., `"ColName"` in the query `"SELECT ColName FROM Table"`). Some columns might have an empty name (e.g., `"SELECT UPPER(ColName)"`). Note that a query result can contain multiple fields with the same name. - "type": # Object with schema name: Type # The type of the field. - }, - ], - }, + "structType": # Object with schema name: StructType # If code == STRUCT, then `struct_type` provides type information for the struct's fields. }, }, "params": { # Parameter names and values that bind to placeholders in the DML string. A parameter placeholder consists of the `@` character followed by the parameter name (for example, `@firstName`). Parameter names can contain letters, numbers, and underscores. Parameters can appear anywhere that a literal value is expected. The same parameter name can be used more than once, for example: `"WHERE id > @msg_id AND id < @msg_id + 100"` It is an error to execute a SQL statement with unbound parameters. @@ -438,7 +431,7 @@

Method Details

}, ], "transaction": { # This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions. # Required. The transaction to use. Must be a read-write transaction. To protect against replays, single-use transactions are not supported. The caller must either supply an existing transaction ID or begin a new transaction. - "begin": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. + "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -453,7 +446,7 @@

Method Details

}, }, "id": "A String", # Execute the read or SQL query in a previously-started transaction. - "singleUse": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. + "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -486,7 +479,11 @@

Method Details

"fields": [ # The list of fields that make up this struct. Order is significant, because values of this struct type are represented as lists, where the order of field values matches the order of fields in the StructType. In turn, the order of fields matches the order of columns in a read request, or the order of fields in the `SELECT` clause of a query. { # Message representing a single field of a struct. "name": "A String", # The name of the field. For reads, this is the column name. For SQL queries, it is the column alias (e.g., `"Word"` in the query `"SELECT 'hello' AS Word"`), or the column name (e.g., `"ColName"` in the query `"SELECT ColName FROM Table"`). Some columns might have an empty name (e.g., `"SELECT UPPER(ColName)"`). Note that a query result can contain multiple fields with the same name. - "type": # Object with schema name: Type # The type of the field. + "type": { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a table cell or returned from an SQL query. # The type of the field. + "arrayElementType": # Object with schema name: Type # If code == ARRAY, then `array_element_type` is the type of the array elements. + "code": "A String", # Required. The TypeCode for this type. + "structType": # Object with schema name: StructType # If code == STRUCT, then `struct_type` provides type information for the struct's fields. + }, }, ], }, @@ -563,14 +560,7 @@

Method Details

"a_key": { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a table cell or returned from an SQL query. "arrayElementType": # Object with schema name: Type # If code == ARRAY, then `array_element_type` is the type of the array elements. "code": "A String", # Required. The TypeCode for this type. - "structType": { # `StructType` defines the fields of a STRUCT type. # If code == STRUCT, then `struct_type` provides type information for the struct's fields. - "fields": [ # The list of fields that make up this struct. Order is significant, because values of this struct type are represented as lists, where the order of field values matches the order of fields in the StructType. In turn, the order of fields matches the order of columns in a read request, or the order of fields in the `SELECT` clause of a query. - { # Message representing a single field of a struct. - "name": "A String", # The name of the field. For reads, this is the column name. For SQL queries, it is the column alias (e.g., `"Word"` in the query `"SELECT 'hello' AS Word"`), or the column name (e.g., `"ColName"` in the query `"SELECT ColName FROM Table"`). Some columns might have an empty name (e.g., `"SELECT UPPER(ColName)"`). Note that a query result can contain multiple fields with the same name. - "type": # Object with schema name: Type # The type of the field. - }, - ], - }, + "structType": # Object with schema name: StructType # If code == STRUCT, then `struct_type` provides type information for the struct's fields. }, }, "params": { # Parameter names and values that bind to placeholders in the SQL string. A parameter placeholder consists of the `@` character followed by the parameter name (for example, `@firstName`). Parameter names must conform to the naming requirements of identifiers as specified at https://cloud.google.com/spanner/docs/lexical#identifiers. Parameters can appear anywhere that a literal value is expected. The same parameter name can be used more than once, for example: `"WHERE id > @msg_id AND id < @msg_id + 100"` It is an error to execute a SQL statement with unbound parameters. @@ -591,7 +581,7 @@

Method Details

"seqno": "A String", # A per-transaction sequence number used to identify this request. This field makes each request idempotent such that if the request is received multiple times, at most one will succeed. The sequence number must be monotonically increasing within the transaction. If a request arrives for the first time with an out-of-order sequence number, the transaction may be aborted. Replays of previously handled requests will yield the same response as the first execution. Required for DML statements. Ignored for queries. "sql": "A String", # Required. The SQL string. "transaction": { # This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions. # The transaction to use. For queries, if none is provided, the default is a temporary read-only transaction with strong concurrency. Standard DML statements require a read-write transaction. To protect against replays, single-use transactions are not supported. The caller must either supply an existing transaction ID or begin a new transaction. Partitioned DML requires an existing Partitioned DML transaction ID. - "begin": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. + "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -606,7 +596,7 @@

Method Details

}, }, "id": "A String", # Execute the read or SQL query in a previously-started transaction. - "singleUse": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. + "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -637,7 +627,11 @@

Method Details

"fields": [ # The list of fields that make up this struct. Order is significant, because values of this struct type are represented as lists, where the order of field values matches the order of fields in the StructType. In turn, the order of fields matches the order of columns in a read request, or the order of fields in the `SELECT` clause of a query. { # Message representing a single field of a struct. "name": "A String", # The name of the field. For reads, this is the column name. For SQL queries, it is the column alias (e.g., `"Word"` in the query `"SELECT 'hello' AS Word"`), or the column name (e.g., `"ColName"` in the query `"SELECT ColName FROM Table"`). Some columns might have an empty name (e.g., `"SELECT UPPER(ColName)"`). Note that a query result can contain multiple fields with the same name. - "type": # Object with schema name: Type # The type of the field. + "type": { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a table cell or returned from an SQL query. # The type of the field. + "arrayElementType": # Object with schema name: Type # If code == ARRAY, then `array_element_type` is the type of the array elements. + "code": "A String", # Required. The TypeCode for this type. + "structType": # Object with schema name: StructType # If code == STRUCT, then `struct_type` provides type information for the struct's fields. + }, }, ], }, @@ -703,14 +697,7 @@

Method Details

"a_key": { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a table cell or returned from an SQL query. "arrayElementType": # Object with schema name: Type # If code == ARRAY, then `array_element_type` is the type of the array elements. "code": "A String", # Required. The TypeCode for this type. - "structType": { # `StructType` defines the fields of a STRUCT type. # If code == STRUCT, then `struct_type` provides type information for the struct's fields. - "fields": [ # The list of fields that make up this struct. Order is significant, because values of this struct type are represented as lists, where the order of field values matches the order of fields in the StructType. In turn, the order of fields matches the order of columns in a read request, or the order of fields in the `SELECT` clause of a query. - { # Message representing a single field of a struct. - "name": "A String", # The name of the field. For reads, this is the column name. For SQL queries, it is the column alias (e.g., `"Word"` in the query `"SELECT 'hello' AS Word"`), or the column name (e.g., `"ColName"` in the query `"SELECT ColName FROM Table"`). Some columns might have an empty name (e.g., `"SELECT UPPER(ColName)"`). Note that a query result can contain multiple fields with the same name. - "type": # Object with schema name: Type # The type of the field. - }, - ], - }, + "structType": # Object with schema name: StructType # If code == STRUCT, then `struct_type` provides type information for the struct's fields. }, }, "params": { # Parameter names and values that bind to placeholders in the SQL string. A parameter placeholder consists of the `@` character followed by the parameter name (for example, `@firstName`). Parameter names must conform to the naming requirements of identifiers as specified at https://cloud.google.com/spanner/docs/lexical#identifiers. Parameters can appear anywhere that a literal value is expected. The same parameter name can be used more than once, for example: `"WHERE id > @msg_id AND id < @msg_id + 100"` It is an error to execute a SQL statement with unbound parameters. @@ -731,7 +718,7 @@

Method Details

"seqno": "A String", # A per-transaction sequence number used to identify this request. This field makes each request idempotent such that if the request is received multiple times, at most one will succeed. The sequence number must be monotonically increasing within the transaction. If a request arrives for the first time with an out-of-order sequence number, the transaction may be aborted. Replays of previously handled requests will yield the same response as the first execution. Required for DML statements. Ignored for queries. "sql": "A String", # Required. The SQL string. "transaction": { # This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions. # The transaction to use. For queries, if none is provided, the default is a temporary read-only transaction with strong concurrency. Standard DML statements require a read-write transaction. To protect against replays, single-use transactions are not supported. The caller must either supply an existing transaction ID or begin a new transaction. Partitioned DML requires an existing Partitioned DML transaction ID. - "begin": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. + "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -746,7 +733,7 @@

Method Details

}, }, "id": "A String", # Execute the read or SQL query in a previously-started transaction. - "singleUse": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. + "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -778,7 +765,11 @@

Method Details

"fields": [ # The list of fields that make up this struct. Order is significant, because values of this struct type are represented as lists, where the order of field values matches the order of fields in the StructType. In turn, the order of fields matches the order of columns in a read request, or the order of fields in the `SELECT` clause of a query. { # Message representing a single field of a struct. "name": "A String", # The name of the field. For reads, this is the column name. For SQL queries, it is the column alias (e.g., `"Word"` in the query `"SELECT 'hello' AS Word"`), or the column name (e.g., `"ColName"` in the query `"SELECT ColName FROM Table"`). Some columns might have an empty name (e.g., `"SELECT UPPER(ColName)"`). Note that a query result can contain multiple fields with the same name. - "type": # Object with schema name: Type # The type of the field. + "type": { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a table cell or returned from an SQL query. # The type of the field. + "arrayElementType": # Object with schema name: Type # If code == ARRAY, then `array_element_type` is the type of the array elements. + "code": "A String", # Required. The TypeCode for this type. + "structType": # Object with schema name: StructType # If code == STRUCT, then `struct_type` provides type information for the struct's fields. + }, }, ], }, @@ -913,14 +904,7 @@

Method Details

"a_key": { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a table cell or returned from an SQL query. "arrayElementType": # Object with schema name: Type # If code == ARRAY, then `array_element_type` is the type of the array elements. "code": "A String", # Required. The TypeCode for this type. - "structType": { # `StructType` defines the fields of a STRUCT type. # If code == STRUCT, then `struct_type` provides type information for the struct's fields. - "fields": [ # The list of fields that make up this struct. Order is significant, because values of this struct type are represented as lists, where the order of field values matches the order of fields in the StructType. In turn, the order of fields matches the order of columns in a read request, or the order of fields in the `SELECT` clause of a query. - { # Message representing a single field of a struct. - "name": "A String", # The name of the field. For reads, this is the column name. For SQL queries, it is the column alias (e.g., `"Word"` in the query `"SELECT 'hello' AS Word"`), or the column name (e.g., `"ColName"` in the query `"SELECT ColName FROM Table"`). Some columns might have an empty name (e.g., `"SELECT UPPER(ColName)"`). Note that a query result can contain multiple fields with the same name. - "type": # Object with schema name: Type # The type of the field. - }, - ], - }, + "structType": # Object with schema name: StructType # If code == STRUCT, then `struct_type` provides type information for the struct's fields. }, }, "params": { # Parameter names and values that bind to placeholders in the SQL string. A parameter placeholder consists of the `@` character followed by the parameter name (for example, `@firstName`). Parameter names can contain letters, numbers, and underscores. Parameters can appear anywhere that a literal value is expected. The same parameter name can be used more than once, for example: `"WHERE id > @msg_id AND id < @msg_id + 100"` It is an error to execute a SQL statement with unbound parameters. @@ -932,7 +916,7 @@

Method Details

}, "sql": "A String", # Required. The query request to generate partitions for. The request will fail if the query is not root partitionable. The query plan of a root partitionable query has a single distributed union operator. A distributed union operator conceptually divides one or more tables into multiple splits, remotely evaluates a subquery independently on each split, and then unions all results. This must not contain DML commands, such as INSERT, UPDATE, or DELETE. Use ExecuteStreamingSql with a PartitionedDml transaction for large, partition-friendly DML operations. "transaction": { # This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions. # Read only snapshot transactions are supported, read/write and single use transactions are not. - "begin": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. + "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -947,7 +931,7 @@

Method Details

}, }, "id": "A String", # Execute the read or SQL query in a previously-started transaction. - "singleUse": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. + "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -1029,7 +1013,7 @@

Method Details

}, "table": "A String", # Required. The name of the table in the database to be read. "transaction": { # This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions. # Read only snapshot transactions are supported, read/write and single use transactions are not. - "begin": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. + "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -1044,7 +1028,7 @@

Method Details

}, }, "id": "A String", # Execute the read or SQL query in a previously-started transaction. - "singleUse": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. + "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -1130,7 +1114,7 @@

Method Details

"resumeToken": "A String", # If this request is resuming a previously interrupted read, `resume_token` should be copied from the last PartialResultSet yielded before the interruption. Doing this enables the new read to resume where the last read left off. The rest of the request parameters must exactly match the request that yielded this token. "table": "A String", # Required. The name of the table in the database to be read. "transaction": { # This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions. # The transaction to use. If none is provided, the default is a temporary read-only transaction with strong concurrency. - "begin": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. + "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -1145,7 +1129,7 @@

Method Details

}, }, "id": "A String", # Execute the read or SQL query in a previously-started transaction. - "singleUse": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. + "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -1176,7 +1160,11 @@

Method Details

"fields": [ # The list of fields that make up this struct. Order is significant, because values of this struct type are represented as lists, where the order of field values matches the order of fields in the StructType. In turn, the order of fields matches the order of columns in a read request, or the order of fields in the `SELECT` clause of a query. { # Message representing a single field of a struct. "name": "A String", # The name of the field. For reads, this is the column name. For SQL queries, it is the column alias (e.g., `"Word"` in the query `"SELECT 'hello' AS Word"`), or the column name (e.g., `"ColName"` in the query `"SELECT ColName FROM Table"`). Some columns might have an empty name (e.g., `"SELECT UPPER(ColName)"`). Note that a query result can contain multiple fields with the same name. - "type": # Object with schema name: Type # The type of the field. + "type": { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a table cell or returned from an SQL query. # The type of the field. + "arrayElementType": # Object with schema name: Type # If code == ARRAY, then `array_element_type` is the type of the array elements. + "code": "A String", # Required. The TypeCode for this type. + "structType": # Object with schema name: StructType # If code == STRUCT, then `struct_type` provides type information for the struct's fields. + }, }, ], }, @@ -1301,7 +1289,7 @@

Method Details

"resumeToken": "A String", # If this request is resuming a previously interrupted read, `resume_token` should be copied from the last PartialResultSet yielded before the interruption. Doing this enables the new read to resume where the last read left off. The rest of the request parameters must exactly match the request that yielded this token. "table": "A String", # Required. The name of the table in the database to be read. "transaction": { # This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions. # The transaction to use. If none is provided, the default is a temporary read-only transaction with strong concurrency. - "begin": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. + "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -1316,7 +1304,7 @@

Method Details

}, }, "id": "A String", # Execute the read or SQL query in a previously-started transaction. - "singleUse": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. + "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -1348,7 +1336,11 @@

Method Details

"fields": [ # The list of fields that make up this struct. Order is significant, because values of this struct type are represented as lists, where the order of field values matches the order of fields in the StructType. In turn, the order of fields matches the order of columns in a read request, or the order of fields in the `SELECT` clause of a query. { # Message representing a single field of a struct. "name": "A String", # The name of the field. For reads, this is the column name. For SQL queries, it is the column alias (e.g., `"Word"` in the query `"SELECT 'hello' AS Word"`), or the column name (e.g., `"ColName"` in the query `"SELECT ColName FROM Table"`). Some columns might have an empty name (e.g., `"SELECT UPPER(ColName)"`). Note that a query result can contain multiple fields with the same name. - "type": # Object with schema name: Type # The type of the field. + "type": { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a table cell or returned from an SQL query. # The type of the field. + "arrayElementType": # Object with schema name: Type # If code == ARRAY, then `array_element_type` is the type of the array elements. + "code": "A String", # Required. The TypeCode for this type. + "structType": # Object with schema name: StructType # If code == STRUCT, then `struct_type` provides type information for the struct's fields. + }, }, ], }, diff --git a/docs/dyn/spanner_v1.scans.html b/docs/dyn/spanner_v1.scans.html index 61d0572cc50..ac8eee55218 100644 --- a/docs/dyn/spanner_v1.scans.html +++ b/docs/dyn/spanner_v1.scans.html @@ -239,6 +239,7 @@

Method Details

"token": "A String", # The token identifying the message, e.g. 'METRIC_READ_CPU'. This should be unique within the service. }, "startKeyIndex": 42, # The index of the start key in indexed_keys. + "timeOffset": "A String", # The time offset. This is the time since the start of the time interval. "unit": { # A message representing a user-facing string whose value may need to be translated before being displayed. # The unit of the metric. This is an unstructured field and will be mapped as is to the user. "args": { # A map of arguments used when creating the localized message. Keys represent parameter names which may be used by the localized version when substituting dynamic values. "a_key": "A String", diff --git a/docs/dyn/storagetransfer_v1.googleServiceAccounts.html b/docs/dyn/storagetransfer_v1.googleServiceAccounts.html index cc40f18fdc5..9c70c101d18 100644 --- a/docs/dyn/storagetransfer_v1.googleServiceAccounts.html +++ b/docs/dyn/storagetransfer_v1.googleServiceAccounts.html @@ -102,6 +102,7 @@

Method Details

{ # Google service account "accountEmail": "A String", # Email address of the service account. + "subjectId": "A String", # Unique identifier for the service account. }
diff --git a/docs/dyn/storagetransfer_v1.transferJobs.html b/docs/dyn/storagetransfer_v1.transferJobs.html index 04e029657dc..06592c9d381 100644 --- a/docs/dyn/storagetransfer_v1.transferJobs.html +++ b/docs/dyn/storagetransfer_v1.transferJobs.html @@ -158,10 +158,11 @@

Method Details

}, "bucketName": "A String", # Required. S3 Bucket name (see [Creating a bucket](https://docs.aws.amazon.com/AmazonS3/latest/dev/create-bucket-get-location-example.html)). "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. + "roleArn": "A String", # Input only. Role arn to support temporary credentials via AssumeRoleWithWebIdentity. When role arn is provided, transfer service will fetch temporary credentials for the session using AssumeRoleWithWebIdentity call for the provided role using the [GoogleServiceAccount] for this project. }, "azureBlobStorageDataSource": { # An AzureBlobStorageData resource can be a data source, but not a data sink. An AzureBlobStorageData resource represents one Azure container. The storage account determines the [Azure endpoint](https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account#storage-account-endpoints). In an AzureBlobStorageData resource, a blobs's name is the [Azure Blob Storage blob's key name](https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata#blob-names). # An Azure Blob Storage data source. "azureCredentials": { # Azure credentials For information on our data retention policy for user credentials, see [User credentials](/storage-transfer/docs/data-retention#user-credentials). # Required. Input only. Credentials used to authenticate API requests to Azure. For information on our data retention policy for user credentials, see [User credentials](/storage-transfer/docs/data-retention#user-credentials). - "sasToken": "A String", # Required. Azure shared access signature. (see [Grant limited access to Azure Storage resources using shared access signatures (SAS)](https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview)). + "sasToken": "A String", # Required. Azure shared access signature (SAS). *Note:*Copying data from Azure Data Lake Storage (ADLS) Gen 2 is in [Preview](/products/#product-launch-stages). During Preview, if you are copying data from ADLS Gen 2, you must use an account SAS. For more information about SAS, see [Grant limited access to Azure Storage resources using shared access signatures (SAS)](https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview). }, "container": "A String", # Required. The container to transfer from the Azure Storage account. "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. @@ -255,10 +256,11 @@

Method Details

}, "bucketName": "A String", # Required. S3 Bucket name (see [Creating a bucket](https://docs.aws.amazon.com/AmazonS3/latest/dev/create-bucket-get-location-example.html)). "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. + "roleArn": "A String", # Input only. Role arn to support temporary credentials via AssumeRoleWithWebIdentity. When role arn is provided, transfer service will fetch temporary credentials for the session using AssumeRoleWithWebIdentity call for the provided role using the [GoogleServiceAccount] for this project. }, "azureBlobStorageDataSource": { # An AzureBlobStorageData resource can be a data source, but not a data sink. An AzureBlobStorageData resource represents one Azure container. The storage account determines the [Azure endpoint](https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account#storage-account-endpoints). In an AzureBlobStorageData resource, a blobs's name is the [Azure Blob Storage blob's key name](https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata#blob-names). # An Azure Blob Storage data source. "azureCredentials": { # Azure credentials For information on our data retention policy for user credentials, see [User credentials](/storage-transfer/docs/data-retention#user-credentials). # Required. Input only. Credentials used to authenticate API requests to Azure. For information on our data retention policy for user credentials, see [User credentials](/storage-transfer/docs/data-retention#user-credentials). - "sasToken": "A String", # Required. Azure shared access signature. (see [Grant limited access to Azure Storage resources using shared access signatures (SAS)](https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview)). + "sasToken": "A String", # Required. Azure shared access signature (SAS). *Note:*Copying data from Azure Data Lake Storage (ADLS) Gen 2 is in [Preview](/products/#product-launch-stages). During Preview, if you are copying data from ADLS Gen 2, you must use an account SAS. For more information about SAS, see [Grant limited access to Azure Storage resources using shared access signatures (SAS)](https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview). }, "container": "A String", # Required. The container to transfer from the Azure Storage account. "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. @@ -360,10 +362,11 @@

Method Details

}, "bucketName": "A String", # Required. S3 Bucket name (see [Creating a bucket](https://docs.aws.amazon.com/AmazonS3/latest/dev/create-bucket-get-location-example.html)). "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. + "roleArn": "A String", # Input only. Role arn to support temporary credentials via AssumeRoleWithWebIdentity. When role arn is provided, transfer service will fetch temporary credentials for the session using AssumeRoleWithWebIdentity call for the provided role using the [GoogleServiceAccount] for this project. }, "azureBlobStorageDataSource": { # An AzureBlobStorageData resource can be a data source, but not a data sink. An AzureBlobStorageData resource represents one Azure container. The storage account determines the [Azure endpoint](https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account#storage-account-endpoints). In an AzureBlobStorageData resource, a blobs's name is the [Azure Blob Storage blob's key name](https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata#blob-names). # An Azure Blob Storage data source. "azureCredentials": { # Azure credentials For information on our data retention policy for user credentials, see [User credentials](/storage-transfer/docs/data-retention#user-credentials). # Required. Input only. Credentials used to authenticate API requests to Azure. For information on our data retention policy for user credentials, see [User credentials](/storage-transfer/docs/data-retention#user-credentials). - "sasToken": "A String", # Required. Azure shared access signature. (see [Grant limited access to Azure Storage resources using shared access signatures (SAS)](https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview)). + "sasToken": "A String", # Required. Azure shared access signature (SAS). *Note:*Copying data from Azure Data Lake Storage (ADLS) Gen 2 is in [Preview](/products/#product-launch-stages). During Preview, if you are copying data from ADLS Gen 2, you must use an account SAS. For more information about SAS, see [Grant limited access to Azure Storage resources using shared access signatures (SAS)](https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview). }, "container": "A String", # Required. The container to transfer from the Azure Storage account. "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. @@ -469,10 +472,11 @@

Method Details

}, "bucketName": "A String", # Required. S3 Bucket name (see [Creating a bucket](https://docs.aws.amazon.com/AmazonS3/latest/dev/create-bucket-get-location-example.html)). "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. + "roleArn": "A String", # Input only. Role arn to support temporary credentials via AssumeRoleWithWebIdentity. When role arn is provided, transfer service will fetch temporary credentials for the session using AssumeRoleWithWebIdentity call for the provided role using the [GoogleServiceAccount] for this project. }, "azureBlobStorageDataSource": { # An AzureBlobStorageData resource can be a data source, but not a data sink. An AzureBlobStorageData resource represents one Azure container. The storage account determines the [Azure endpoint](https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account#storage-account-endpoints). In an AzureBlobStorageData resource, a blobs's name is the [Azure Blob Storage blob's key name](https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata#blob-names). # An Azure Blob Storage data source. "azureCredentials": { # Azure credentials For information on our data retention policy for user credentials, see [User credentials](/storage-transfer/docs/data-retention#user-credentials). # Required. Input only. Credentials used to authenticate API requests to Azure. For information on our data retention policy for user credentials, see [User credentials](/storage-transfer/docs/data-retention#user-credentials). - "sasToken": "A String", # Required. Azure shared access signature. (see [Grant limited access to Azure Storage resources using shared access signatures (SAS)](https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview)). + "sasToken": "A String", # Required. Azure shared access signature (SAS). *Note:*Copying data from Azure Data Lake Storage (ADLS) Gen 2 is in [Preview](/products/#product-launch-stages). During Preview, if you are copying data from ADLS Gen 2, you must use an account SAS. For more information about SAS, see [Grant limited access to Azure Storage resources using shared access signatures (SAS)](https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview). }, "container": "A String", # Required. The container to transfer from the Azure Storage account. "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. @@ -586,10 +590,11 @@

Method Details

}, "bucketName": "A String", # Required. S3 Bucket name (see [Creating a bucket](https://docs.aws.amazon.com/AmazonS3/latest/dev/create-bucket-get-location-example.html)). "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. + "roleArn": "A String", # Input only. Role arn to support temporary credentials via AssumeRoleWithWebIdentity. When role arn is provided, transfer service will fetch temporary credentials for the session using AssumeRoleWithWebIdentity call for the provided role using the [GoogleServiceAccount] for this project. }, "azureBlobStorageDataSource": { # An AzureBlobStorageData resource can be a data source, but not a data sink. An AzureBlobStorageData resource represents one Azure container. The storage account determines the [Azure endpoint](https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account#storage-account-endpoints). In an AzureBlobStorageData resource, a blobs's name is the [Azure Blob Storage blob's key name](https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata#blob-names). # An Azure Blob Storage data source. "azureCredentials": { # Azure credentials For information on our data retention policy for user credentials, see [User credentials](/storage-transfer/docs/data-retention#user-credentials). # Required. Input only. Credentials used to authenticate API requests to Azure. For information on our data retention policy for user credentials, see [User credentials](/storage-transfer/docs/data-retention#user-credentials). - "sasToken": "A String", # Required. Azure shared access signature. (see [Grant limited access to Azure Storage resources using shared access signatures (SAS)](https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview)). + "sasToken": "A String", # Required. Azure shared access signature (SAS). *Note:*Copying data from Azure Data Lake Storage (ADLS) Gen 2 is in [Preview](/products/#product-launch-stages). During Preview, if you are copying data from ADLS Gen 2, you must use an account SAS. For more information about SAS, see [Grant limited access to Azure Storage resources using shared access signatures (SAS)](https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview). }, "container": "A String", # Required. The container to transfer from the Azure Storage account. "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. @@ -685,10 +690,11 @@

Method Details

}, "bucketName": "A String", # Required. S3 Bucket name (see [Creating a bucket](https://docs.aws.amazon.com/AmazonS3/latest/dev/create-bucket-get-location-example.html)). "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. + "roleArn": "A String", # Input only. Role arn to support temporary credentials via AssumeRoleWithWebIdentity. When role arn is provided, transfer service will fetch temporary credentials for the session using AssumeRoleWithWebIdentity call for the provided role using the [GoogleServiceAccount] for this project. }, "azureBlobStorageDataSource": { # An AzureBlobStorageData resource can be a data source, but not a data sink. An AzureBlobStorageData resource represents one Azure container. The storage account determines the [Azure endpoint](https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account#storage-account-endpoints). In an AzureBlobStorageData resource, a blobs's name is the [Azure Blob Storage blob's key name](https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata#blob-names). # An Azure Blob Storage data source. "azureCredentials": { # Azure credentials For information on our data retention policy for user credentials, see [User credentials](/storage-transfer/docs/data-retention#user-credentials). # Required. Input only. Credentials used to authenticate API requests to Azure. For information on our data retention policy for user credentials, see [User credentials](/storage-transfer/docs/data-retention#user-credentials). - "sasToken": "A String", # Required. Azure shared access signature. (see [Grant limited access to Azure Storage resources using shared access signatures (SAS)](https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview)). + "sasToken": "A String", # Required. Azure shared access signature (SAS). *Note:*Copying data from Azure Data Lake Storage (ADLS) Gen 2 is in [Preview](/products/#product-launch-stages). During Preview, if you are copying data from ADLS Gen 2, you must use an account SAS. For more information about SAS, see [Grant limited access to Azure Storage resources using shared access signatures (SAS)](https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview). }, "container": "A String", # Required. The container to transfer from the Azure Storage account. "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. diff --git a/docs/dyn/sts_v1.v1.html b/docs/dyn/sts_v1.v1.html index 37f1ebe5244..113c877df0b 100644 --- a/docs/dyn/sts_v1.v1.html +++ b/docs/dyn/sts_v1.v1.html @@ -136,8 +136,8 @@

Method Details

"options": "A String", # A set of features that Security Token Service supports, in addition to the standard OAuth 2.0 token exchange, formatted as a serialized JSON object of Options. "requestedTokenType": "A String", # Required. An identifier for the type of requested security token. Must be `urn:ietf:params:oauth:token-type:access_token`. "scope": "A String", # The OAuth 2.0 scopes to include on the resulting access token, formatted as a list of space-delimited, case-sensitive strings. Required when exchanging an external credential for a Google access token. - "subjectToken": "A String", # Required. The input token. This token is either an external credential issued by a workload identity pool provider, or a short-lived access token issued by Google. If the token is an OIDC JWT, it must use the JWT format defined in [RFC 7523](https://tools.ietf.org/html/rfc7523), and the `subject_token_type` must be `urn:ietf:params:oauth:token-type:jwt`. The following headers are required: - `kid`: The identifier of the signing key securing the JWT. - `alg`: The cryptographic algorithm securing the JWT. Must be `RS256` or `ES256`. The following payload fields are required. For more information, see [RFC 7523, Section 3](https://tools.ietf.org/html/rfc7523#section-3): - `iss`: The issuer of the token. The issuer must provide a discovery document at the URL `/.well-known/openid-configuration`, where `` is the value of this field. The document must be formatted according to section 4.2 of the [OIDC 1.0 Discovery specification](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationResponse). - `iat`: The issue time, in seconds, since the Unix epoch. Must be in the past. - `exp`: The expiration time, in seconds, since the Unix epoch. Must be less than 48 hours after `iat`. Shorter expiration times are more secure. If possible, we recommend setting an expiration time less than 6 hours. - `sub`: The identity asserted in the JWT. - `aud`: For workload identity pools, this must be a value specified in the allowed audiences for the workload identity pool provider, or one of the audiences allowed by default if no audiences were specified. See https://cloud.google.com/iam/docs/reference/rest/v1/projects.locations.workloadIdentityPools.providers#oidc Example header: ``` { "alg": "RS256", "kid": "us-east-11" } ``` Example payload: ``` { "iss": "https://accounts.google.com", "iat": 1517963104, "exp": 1517966704, "aud": "//iam.googleapis.com/projects/1234567890123/locations/global/workloadIdentityPools/my-pool/providers/my-provider", "sub": "113475438248934895348", "my_claims": { "additional_claim": "value" } } ``` If `subject_token` is for AWS, it must be a serialized `GetCallerIdentity` token. This token contains the same information as a request to the AWS [`GetCallerIdentity()`](https://docs.aws.amazon.com/STS/latest/APIReference/API_GetCallerIdentity) method, as well as the AWS [signature](https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) for the request information. Use Signature Version 4. Format the request as URL-encoded JSON, and set the `subject_token_type` parameter to `urn:ietf:params:aws:token-type:aws4_request`. The following parameters are required: - `url`: The URL of the AWS STS endpoint for `GetCallerIdentity()`, such as `https://sts.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15`. Regional endpoints are also supported. - `method`: The HTTP request method: `POST`. - `headers`: The HTTP request headers, which must include: - `Authorization`: The request signature. - `x-amz-date`: The time you will send the request, formatted as an [ISO8601 Basic](https://docs.aws.amazon.com/general/latest/gr/sigv4_elements.html#sigv4_elements_date) string. This value is typically set to the current time and is used to help prevent replay attacks. - `host`: The hostname of the `url` field; for example, `sts.amazonaws.com`. - `x-goog-cloud-target-resource`: The full, canonical resource name of the workload identity pool provider, with or without an `https:` prefix. To help ensure data integrity, we recommend including this header in the `SignedHeaders` field of the signed request. For example: //iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ https://iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ If you are using temporary security credentials provided by AWS, you must also include the header `x-amz-security-token`, with the value set to the session token. The following example shows a `GetCallerIdentity` token: ``` { "headers": [ {"key": "x-amz-date", "value": "20200815T015049Z"}, {"key": "Authorization", "value": "AWS4-HMAC-SHA256+Credential=$credential,+SignedHeaders=host;x-amz-date;x-goog-cloud-target-resource,+Signature=$signature"}, {"key": "x-goog-cloud-target-resource", "value": "//iam.googleapis.com/projects//locations//workloadIdentityPools//providers/"}, {"key": "host", "value": "sts.amazonaws.com"} . ], "method": "POST", "url": "https://sts.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15" } ``` You can also use a Google-issued OAuth 2.0 access token with this field to obtain an access token with new security attributes applied, such as a Credential Access Boundary. In this case, set `subject_token_type` to `urn:ietf:params:oauth:token-type:access_token`. If an access token already contains security attributes, you cannot apply additional security attributes. - "subjectTokenType": "A String", # Required. An identifier that indicates the type of the security token in the `subject_token` parameter. Supported values are `urn:ietf:params:oauth:token-type:jwt`, `urn:ietf:params:aws:token-type:aws4_request`, and `urn:ietf:params:oauth:token-type:access_token`. + "subjectToken": "A String", # Required. The input token. This token is either an external credential issued by a workload identity pool provider, or a short-lived access token issued by Google. If the token is an OIDC JWT, it must use the JWT format defined in [RFC 7523](https://tools.ietf.org/html/rfc7523), and the `subject_token_type` must be either `urn:ietf:params:oauth:token-type:jwt` or `urn:ietf:params:oauth:token-type:id_token`. The following headers are required: - `kid`: The identifier of the signing key securing the JWT. - `alg`: The cryptographic algorithm securing the JWT. Must be `RS256` or `ES256`. The following payload fields are required. For more information, see [RFC 7523, Section 3](https://tools.ietf.org/html/rfc7523#section-3): - `iss`: The issuer of the token. The issuer must provide a discovery document at the URL `/.well-known/openid-configuration`, where `` is the value of this field. The document must be formatted according to section 4.2 of the [OIDC 1.0 Discovery specification](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationResponse). - `iat`: The issue time, in seconds, since the Unix epoch. Must be in the past. - `exp`: The expiration time, in seconds, since the Unix epoch. Must be less than 48 hours after `iat`. Shorter expiration times are more secure. If possible, we recommend setting an expiration time less than 6 hours. - `sub`: The identity asserted in the JWT. - `aud`: For workload identity pools, this must be a value specified in the allowed audiences for the workload identity pool provider, or one of the audiences allowed by default if no audiences were specified. See https://cloud.google.com/iam/docs/reference/rest/v1/projects.locations.workloadIdentityPools.providers#oidc Example header: ``` { "alg": "RS256", "kid": "us-east-11" } ``` Example payload: ``` { "iss": "https://accounts.google.com", "iat": 1517963104, "exp": 1517966704, "aud": "//iam.googleapis.com/projects/1234567890123/locations/global/workloadIdentityPools/my-pool/providers/my-provider", "sub": "113475438248934895348", "my_claims": { "additional_claim": "value" } } ``` If `subject_token` is for AWS, it must be a serialized `GetCallerIdentity` token. This token contains the same information as a request to the AWS [`GetCallerIdentity()`](https://docs.aws.amazon.com/STS/latest/APIReference/API_GetCallerIdentity) method, as well as the AWS [signature](https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) for the request information. Use Signature Version 4. Format the request as URL-encoded JSON, and set the `subject_token_type` parameter to `urn:ietf:params:aws:token-type:aws4_request`. The following parameters are required: - `url`: The URL of the AWS STS endpoint for `GetCallerIdentity()`, such as `https://sts.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15`. Regional endpoints are also supported. - `method`: The HTTP request method: `POST`. - `headers`: The HTTP request headers, which must include: - `Authorization`: The request signature. - `x-amz-date`: The time you will send the request, formatted as an [ISO8601 Basic](https://docs.aws.amazon.com/general/latest/gr/sigv4_elements.html#sigv4_elements_date) string. This value is typically set to the current time and is used to help prevent replay attacks. - `host`: The hostname of the `url` field; for example, `sts.amazonaws.com`. - `x-goog-cloud-target-resource`: The full, canonical resource name of the workload identity pool provider, with or without an `https:` prefix. To help ensure data integrity, we recommend including this header in the `SignedHeaders` field of the signed request. For example: //iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ https://iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ If you are using temporary security credentials provided by AWS, you must also include the header `x-amz-security-token`, with the value set to the session token. The following example shows a `GetCallerIdentity` token: ``` { "headers": [ {"key": "x-amz-date", "value": "20200815T015049Z"}, {"key": "Authorization", "value": "AWS4-HMAC-SHA256+Credential=$credential,+SignedHeaders=host;x-amz-date;x-goog-cloud-target-resource,+Signature=$signature"}, {"key": "x-goog-cloud-target-resource", "value": "//iam.googleapis.com/projects//locations//workloadIdentityPools//providers/"}, {"key": "host", "value": "sts.amazonaws.com"} . ], "method": "POST", "url": "https://sts.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15" } ``` You can also use a Google-issued OAuth 2.0 access token with this field to obtain an access token with new security attributes applied, such as a Credential Access Boundary. In this case, set `subject_token_type` to `urn:ietf:params:oauth:token-type:access_token`. If an access token already contains security attributes, you cannot apply additional security attributes. + "subjectTokenType": "A String", # Required. An identifier that indicates the type of the security token in the `subject_token` parameter. Supported values are `urn:ietf:params:oauth:token-type:jwt`, `urn:ietf:params:oauth:token-type:id_token`, `urn:ietf:params:aws:token-type:aws4_request`, and `urn:ietf:params:oauth:token-type:access_token`. } x__xgafv: string, V1 error format. diff --git a/docs/dyn/sts_v1beta.v1beta.html b/docs/dyn/sts_v1beta.v1beta.html index 8f0f78320b8..44ba55f7218 100644 --- a/docs/dyn/sts_v1beta.v1beta.html +++ b/docs/dyn/sts_v1beta.v1beta.html @@ -100,8 +100,8 @@

Method Details

"options": "A String", # A set of features that Security Token Service supports, in addition to the standard OAuth 2.0 token exchange, formatted as a serialized JSON object of Options. "requestedTokenType": "A String", # Required. The type of security token. Must be `urn:ietf:params:oauth:token-type:access_token`, which indicates an OAuth 2.0 access token. "scope": "A String", # The OAuth 2.0 scopes to include on the resulting access token, formatted as a list of space-delimited, case-sensitive strings. Required when exchanging an external credential for a Google access token. - "subjectToken": "A String", # Required. The input token. This token is either an external credential issued by a workload identity pool provider, or a short-lived access token issued by Google. If the token is an OIDC JWT, it must use the JWT format defined in [RFC 7523](https://tools.ietf.org/html/rfc7523), and the `subject_token_type` must be `urn:ietf:params:oauth:token-type:jwt`. The following headers are required: - `kid`: The identifier of the signing key securing the JWT. - `alg`: The cryptographic algorithm securing the JWT. Must be `RS256` or `ES256`. The following payload fields are required. For more information, see [RFC 7523, Section 3](https://tools.ietf.org/html/rfc7523#section-3): - `iss`: The issuer of the token. The issuer must provide a discovery document at the URL `/.well-known/openid-configuration`, where `` is the value of this field. The document must be formatted according to section 4.2 of the [OIDC 1.0 Discovery specification](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationResponse). - `iat`: The issue time, in seconds, since the Unix epoch. Must be in the past. - `exp`: The expiration time, in seconds, since the Unix epoch. Must be less than 48 hours after `iat`. Shorter expiration times are more secure. If possible, we recommend setting an expiration time less than 6 hours. - `sub`: The identity asserted in the JWT. - `aud`: For workload identity pools, this must be a value specified in the allowed audiences for the workload identity pool provider, or one of the audiences allowed by default if no audiences were specified. See https://cloud.google.com/iam/docs/reference/rest/v1/projects.locations.workloadIdentityPools.providers#oidc Example header: ``` { "alg": "RS256", "kid": "us-east-11" } ``` Example payload: ``` { "iss": "https://accounts.google.com", "iat": 1517963104, "exp": 1517966704, "aud": "//iam.googleapis.com/projects/1234567890123/locations/global/workloadIdentityPools/my-pool/providers/my-provider", "sub": "113475438248934895348", "my_claims": { "additional_claim": "value" } } ``` If `subject_token` is for AWS, it must be a serialized `GetCallerIdentity` token. This token contains the same information as a request to the AWS [`GetCallerIdentity()`](https://docs.aws.amazon.com/STS/latest/APIReference/API_GetCallerIdentity) method, as well as the AWS [signature](https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) for the request information. Use Signature Version 4. Format the request as URL-encoded JSON, and set the `subject_token_type` parameter to `urn:ietf:params:aws:token-type:aws4_request`. The following parameters are required: - `url`: The URL of the AWS STS endpoint for `GetCallerIdentity()`, such as `https://sts.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15`. Regional endpoints are also supported. - `method`: The HTTP request method: `POST`. - `headers`: The HTTP request headers, which must include: - `Authorization`: The request signature. - `x-amz-date`: The time you will send the request, formatted as an [ISO8601 Basic](https://docs.aws.amazon.com/general/latest/gr/sigv4_elements.html#sigv4_elements_date) string. This value is typically set to the current time and is used to help prevent replay attacks. - `host`: The hostname of the `url` field; for example, `sts.amazonaws.com`. - `x-goog-cloud-target-resource`: The full, canonical resource name of the workload identity pool provider, with or without an `https:` prefix. To help ensure data integrity, we recommend including this header in the `SignedHeaders` field of the signed request. For example: //iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ https://iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ If you are using temporary security credentials provided by AWS, you must also include the header `x-amz-security-token`, with the value set to the session token. The following example shows a `GetCallerIdentity` token: ``` { "headers": [ {"key": "x-amz-date", "value": "20200815T015049Z"}, {"key": "Authorization", "value": "AWS4-HMAC-SHA256+Credential=$credential,+SignedHeaders=host;x-amz-date;x-goog-cloud-target-resource,+Signature=$signature"}, {"key": "x-goog-cloud-target-resource", "value": "//iam.googleapis.com/projects//locations//workloadIdentityPools//providers/"}, {"key": "host", "value": "sts.amazonaws.com"} . ], "method": "POST", "url": "https://sts.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15" } ``` You can also use a Google-issued OAuth 2.0 access token with this field to obtain an access token with new security attributes applied, such as a Credential Access Boundary. In this case, set `subject_token_type` to `urn:ietf:params:oauth:token-type:access_token`. If an access token already contains security attributes, you cannot apply additional security attributes. - "subjectTokenType": "A String", # Required. An identifier that indicates the type of the security token in the `subject_token` parameter. Supported values are `urn:ietf:params:oauth:token-type:jwt`, `urn:ietf:params:aws:token-type:aws4_request`, and `urn:ietf:params:oauth:token-type:access_token`. + "subjectToken": "A String", # Required. The input token. This token is either an external credential issued by a workload identity pool provider, or a short-lived access token issued by Google. If the token is an OIDC JWT, it must use the JWT format defined in [RFC 7523](https://tools.ietf.org/html/rfc7523), and the `subject_token_type` must be either `urn:ietf:params:oauth:token-type:jwt` or `urn:ietf:params:oauth:token-type:id_token`. The following headers are required: - `kid`: The identifier of the signing key securing the JWT. - `alg`: The cryptographic algorithm securing the JWT. Must be `RS256` or `ES256`. The following payload fields are required. For more information, see [RFC 7523, Section 3](https://tools.ietf.org/html/rfc7523#section-3): - `iss`: The issuer of the token. The issuer must provide a discovery document at the URL `/.well-known/openid-configuration`, where `` is the value of this field. The document must be formatted according to section 4.2 of the [OIDC 1.0 Discovery specification](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationResponse). - `iat`: The issue time, in seconds, since the Unix epoch. Must be in the past. - `exp`: The expiration time, in seconds, since the Unix epoch. Must be less than 48 hours after `iat`. Shorter expiration times are more secure. If possible, we recommend setting an expiration time less than 6 hours. - `sub`: The identity asserted in the JWT. - `aud`: For workload identity pools, this must be a value specified in the allowed audiences for the workload identity pool provider, or one of the audiences allowed by default if no audiences were specified. See https://cloud.google.com/iam/docs/reference/rest/v1/projects.locations.workloadIdentityPools.providers#oidc Example header: ``` { "alg": "RS256", "kid": "us-east-11" } ``` Example payload: ``` { "iss": "https://accounts.google.com", "iat": 1517963104, "exp": 1517966704, "aud": "//iam.googleapis.com/projects/1234567890123/locations/global/workloadIdentityPools/my-pool/providers/my-provider", "sub": "113475438248934895348", "my_claims": { "additional_claim": "value" } } ``` If `subject_token` is for AWS, it must be a serialized `GetCallerIdentity` token. This token contains the same information as a request to the AWS [`GetCallerIdentity()`](https://docs.aws.amazon.com/STS/latest/APIReference/API_GetCallerIdentity) method, as well as the AWS [signature](https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) for the request information. Use Signature Version 4. Format the request as URL-encoded JSON, and set the `subject_token_type` parameter to `urn:ietf:params:aws:token-type:aws4_request`. The following parameters are required: - `url`: The URL of the AWS STS endpoint for `GetCallerIdentity()`, such as `https://sts.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15`. Regional endpoints are also supported. - `method`: The HTTP request method: `POST`. - `headers`: The HTTP request headers, which must include: - `Authorization`: The request signature. - `x-amz-date`: The time you will send the request, formatted as an [ISO8601 Basic](https://docs.aws.amazon.com/general/latest/gr/sigv4_elements.html#sigv4_elements_date) string. This value is typically set to the current time and is used to help prevent replay attacks. - `host`: The hostname of the `url` field; for example, `sts.amazonaws.com`. - `x-goog-cloud-target-resource`: The full, canonical resource name of the workload identity pool provider, with or without an `https:` prefix. To help ensure data integrity, we recommend including this header in the `SignedHeaders` field of the signed request. For example: //iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ https://iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ If you are using temporary security credentials provided by AWS, you must also include the header `x-amz-security-token`, with the value set to the session token. The following example shows a `GetCallerIdentity` token: ``` { "headers": [ {"key": "x-amz-date", "value": "20200815T015049Z"}, {"key": "Authorization", "value": "AWS4-HMAC-SHA256+Credential=$credential,+SignedHeaders=host;x-amz-date;x-goog-cloud-target-resource,+Signature=$signature"}, {"key": "x-goog-cloud-target-resource", "value": "//iam.googleapis.com/projects//locations//workloadIdentityPools//providers/"}, {"key": "host", "value": "sts.amazonaws.com"} . ], "method": "POST", "url": "https://sts.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15" } ``` You can also use a Google-issued OAuth 2.0 access token with this field to obtain an access token with new security attributes applied, such as a Credential Access Boundary. In this case, set `subject_token_type` to `urn:ietf:params:oauth:token-type:access_token`. If an access token already contains security attributes, you cannot apply additional security attributes. + "subjectTokenType": "A String", # Required. An identifier that indicates the type of the security token in the `subject_token` parameter. Supported values are `urn:ietf:params:oauth:token-type:jwt`, `urn:ietf:params:oauth:token-type:id_token`, `urn:ietf:params:aws:token-type:aws4_request`, and `urn:ietf:params:oauth:token-type:access_token`. } x__xgafv: string, V1 error format. diff --git a/googleapiclient/discovery_cache/documents/acceleratedmobilepageurl.v1.json b/googleapiclient/discovery_cache/documents/acceleratedmobilepageurl.v1.json index 0996b6d3367..1dc5afa57d8 100644 --- a/googleapiclient/discovery_cache/documents/acceleratedmobilepageurl.v1.json +++ b/googleapiclient/discovery_cache/documents/acceleratedmobilepageurl.v1.json @@ -115,7 +115,7 @@ } } }, - "revision": "20210626", + "revision": "20210703", "rootUrl": "https://acceleratedmobilepageurl.googleapis.com/", "schemas": { "AmpUrl": { diff --git a/googleapiclient/discovery_cache/documents/accessapproval.v1.json b/googleapiclient/discovery_cache/documents/accessapproval.v1.json index 56e04cef3b3..f9efe373db6 100644 --- a/googleapiclient/discovery_cache/documents/accessapproval.v1.json +++ b/googleapiclient/discovery_cache/documents/accessapproval.v1.json @@ -754,7 +754,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://accessapproval.googleapis.com/", "schemas": { "AccessApprovalSettings": { @@ -935,7 +935,7 @@ "id": "EnrolledService", "properties": { "cloudProduct": { - "description": "The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * App Engine * BigQuery * Cloud Bigtable * Cloud Key Management Service * Compute Engine * Cloud Dataflow * Cloud Identity and Access Management * Cloud Logging * Cloud Pub/Sub * Cloud Spanner * Cloud SQL * Cloud Storage * Google Kubernetes Engine * Persistent Disk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * appengine.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * iam.googleapis.com * logging.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services", + "description": "The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * GA * App Engine * BigQuery * Cloud Bigtable * Cloud Key Management Service * Compute Engine * Cloud Dataflow * Cloud DLP * Cloud EKM * Cloud HSM * Cloud Identity and Access Management * Cloud Logging * Cloud Pub/Sub * Cloud Spanner * Cloud SQL * Cloud Storage * Google Kubernetes Engine * Persistent Disk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * ga-only * appengine.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * dlp.googleapis.com * iam.googleapis.com * logging.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services", "type": "string" }, "enrollmentLevel": { diff --git a/googleapiclient/discovery_cache/documents/adexchangebuyer.v1.2.json b/googleapiclient/discovery_cache/documents/adexchangebuyer.v1.2.json index c89daf88126..0b582212e90 100644 --- a/googleapiclient/discovery_cache/documents/adexchangebuyer.v1.2.json +++ b/googleapiclient/discovery_cache/documents/adexchangebuyer.v1.2.json @@ -15,7 +15,7 @@ "description": "Accesses your bidding-account information, submits creatives for validation, finds available direct deals, and retrieves performance reports.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/ad-exchange/buyer-rest", - "etag": "\"uWj2hSb4GVjzdDlAnRd2gbM1ZQ8/RFMuIj0KJEvE_FBrpyYlt8BGaMk\"", + "etag": "\"uWj2hSb4GVjzdDlAnRd2gbM1ZQ8/pBR1hM2Sf2g00WQqJp7gw6W9X9g\"", "icons": { "x16": "https://www.google.com/images/icons/product/doubleclick-16.gif", "x32": "https://www.google.com/images/icons/product/doubleclick-32.gif" @@ -259,7 +259,7 @@ } } }, - "revision": "20210627", + "revision": "20210703", "rootUrl": "https://www.googleapis.com/", "schemas": { "Account": { diff --git a/googleapiclient/discovery_cache/documents/adexchangebuyer.v1.3.json b/googleapiclient/discovery_cache/documents/adexchangebuyer.v1.3.json index 8d39db3a915..62eb5e32635 100644 --- a/googleapiclient/discovery_cache/documents/adexchangebuyer.v1.3.json +++ b/googleapiclient/discovery_cache/documents/adexchangebuyer.v1.3.json @@ -15,7 +15,7 @@ "description": "Accesses your bidding-account information, submits creatives for validation, finds available direct deals, and retrieves performance reports.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/ad-exchange/buyer-rest", - "etag": "\"uWj2hSb4GVjzdDlAnRd2gbM1ZQ8/HiSbGUiOLMnPuKzxHzS96zGf6dg\"", + "etag": "\"uWj2hSb4GVjzdDlAnRd2gbM1ZQ8/z3yJmd5YhBxhFGHLDmUofK9-u1o\"", "icons": { "x16": "https://www.google.com/images/icons/product/doubleclick-16.gif", "x32": "https://www.google.com/images/icons/product/doubleclick-32.gif" @@ -699,7 +699,7 @@ } } }, - "revision": "20210627", + "revision": "20210703", "rootUrl": "https://www.googleapis.com/", "schemas": { "Account": { diff --git a/googleapiclient/discovery_cache/documents/adexchangebuyer.v1.4.json b/googleapiclient/discovery_cache/documents/adexchangebuyer.v1.4.json index 2d6ce5a5a31..8dd40889139 100644 --- a/googleapiclient/discovery_cache/documents/adexchangebuyer.v1.4.json +++ b/googleapiclient/discovery_cache/documents/adexchangebuyer.v1.4.json @@ -15,7 +15,7 @@ "description": "Accesses your bidding-account information, submits creatives for validation, finds available direct deals, and retrieves performance reports.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/ad-exchange/buyer-rest", - "etag": "\"uWj2hSb4GVjzdDlAnRd2gbM1ZQ8/HW0q-tE1sQojuAVbOMcgY52cAtw\"", + "etag": "\"uWj2hSb4GVjzdDlAnRd2gbM1ZQ8/uPTe_psXxX6Quo8DgpvaItXDGjA\"", "icons": { "x16": "https://www.google.com/images/icons/product/doubleclick-16.gif", "x32": "https://www.google.com/images/icons/product/doubleclick-32.gif" @@ -1255,7 +1255,7 @@ } } }, - "revision": "20210627", + "revision": "20210703", "rootUrl": "https://www.googleapis.com/", "schemas": { "Account": { diff --git a/googleapiclient/discovery_cache/documents/adexchangebuyer2.v2beta1.json b/googleapiclient/discovery_cache/documents/adexchangebuyer2.v2beta1.json index b7506758959..3b6c676f2a5 100644 --- a/googleapiclient/discovery_cache/documents/adexchangebuyer2.v2beta1.json +++ b/googleapiclient/discovery_cache/documents/adexchangebuyer2.v2beta1.json @@ -2568,7 +2568,7 @@ } } }, - "revision": "20210624", + "revision": "20210701", "rootUrl": "https://adexchangebuyer.googleapis.com/", "schemas": { "AbsoluteDateRange": { diff --git a/googleapiclient/discovery_cache/documents/admin.datatransfer_v1.json b/googleapiclient/discovery_cache/documents/admin.datatransfer_v1.json index f8622fea30b..99bd31631bf 100644 --- a/googleapiclient/discovery_cache/documents/admin.datatransfer_v1.json +++ b/googleapiclient/discovery_cache/documents/admin.datatransfer_v1.json @@ -272,7 +272,7 @@ } } }, - "revision": "20210622", + "revision": "20210629", "rootUrl": "https://admin.googleapis.com/", "schemas": { "Application": { diff --git a/googleapiclient/discovery_cache/documents/admin.directory_v1.json b/googleapiclient/discovery_cache/documents/admin.directory_v1.json index 52f3e0a5947..ff67cce647f 100644 --- a/googleapiclient/discovery_cache/documents/admin.directory_v1.json +++ b/googleapiclient/discovery_cache/documents/admin.directory_v1.json @@ -4397,7 +4397,7 @@ } } }, - "revision": "20210622", + "revision": "20210629", "rootUrl": "https://admin.googleapis.com/", "schemas": { "Alias": { diff --git a/googleapiclient/discovery_cache/documents/admin.reports_v1.json b/googleapiclient/discovery_cache/documents/admin.reports_v1.json index a92a413bff1..50e0b67f803 100644 --- a/googleapiclient/discovery_cache/documents/admin.reports_v1.json +++ b/googleapiclient/discovery_cache/documents/admin.reports_v1.json @@ -631,7 +631,7 @@ } } }, - "revision": "20210622", + "revision": "20210629", "rootUrl": "https://admin.googleapis.com/", "schemas": { "Activities": { diff --git a/googleapiclient/discovery_cache/documents/admob.v1.json b/googleapiclient/discovery_cache/documents/admob.v1.json index 383a8767ac2..464cbb7afa9 100644 --- a/googleapiclient/discovery_cache/documents/admob.v1.json +++ b/googleapiclient/discovery_cache/documents/admob.v1.json @@ -321,7 +321,7 @@ } } }, - "revision": "20210628", + "revision": "20210701", "rootUrl": "https://admob.googleapis.com/", "schemas": { "AdUnit": { diff --git a/googleapiclient/discovery_cache/documents/admob.v1beta.json b/googleapiclient/discovery_cache/documents/admob.v1beta.json index 081674f8158..fedeb333bf3 100644 --- a/googleapiclient/discovery_cache/documents/admob.v1beta.json +++ b/googleapiclient/discovery_cache/documents/admob.v1beta.json @@ -321,7 +321,7 @@ } } }, - "revision": "20210628", + "revision": "20210701", "rootUrl": "https://admob.googleapis.com/", "schemas": { "AdUnit": { diff --git a/googleapiclient/discovery_cache/documents/adsense.v2.json b/googleapiclient/discovery_cache/documents/adsense.v2.json index 44c10ec7ecc..ab898cc28d9 100644 --- a/googleapiclient/discovery_cache/documents/adsense.v2.json +++ b/googleapiclient/discovery_cache/documents/adsense.v2.json @@ -701,6 +701,8 @@ "REQUESTED_AD_TYPE_CODE", "SERVED_AD_TYPE_NAME", "SERVED_AD_TYPE_CODE", + "AD_FORMAT_NAME", + "AD_FORMAT_CODE", "CUSTOM_SEARCH_STYLE_NAME", "CUSTOM_SEARCH_STYLE_ID", "DOMAIN_REGISTRANT", @@ -747,6 +749,8 @@ "Requested ad type code (e.g. \"IMAGE\", \"RADLINK\", \"OTHER\").", "Localized served ad type name (e.g. \"Display\", \"Link unit\", \"Other\").", "Served ad type code (e.g. \"IMAGE\", \"RADLINK\", \"OTHER\").", + "Localized ad format name indicating the way an ad is shown to the users on your site (e.g. \"In-page\", \"Anchor\", \"Vignette\").", + "Ad format code indicating the way an ad is shown to the users on your site (e.g. \"ON_PAGE\", \"ANCHOR\", \"INTERSTITIAL\").", "Custom search style name.", "Custom search style id.", "Domain registrants.", @@ -1005,6 +1009,8 @@ "REQUESTED_AD_TYPE_CODE", "SERVED_AD_TYPE_NAME", "SERVED_AD_TYPE_CODE", + "AD_FORMAT_NAME", + "AD_FORMAT_CODE", "CUSTOM_SEARCH_STYLE_NAME", "CUSTOM_SEARCH_STYLE_ID", "DOMAIN_REGISTRANT", @@ -1051,6 +1057,8 @@ "Requested ad type code (e.g. \"IMAGE\", \"RADLINK\", \"OTHER\").", "Localized served ad type name (e.g. \"Display\", \"Link unit\", \"Other\").", "Served ad type code (e.g. \"IMAGE\", \"RADLINK\", \"OTHER\").", + "Localized ad format name indicating the way an ad is shown to the users on your site (e.g. \"In-page\", \"Anchor\", \"Vignette\").", + "Ad format code indicating the way an ad is shown to the users on your site (e.g. \"ON_PAGE\", \"ANCHOR\", \"INTERSTITIAL\").", "Custom search style name.", "Custom search style id.", "Domain registrants.", @@ -1559,7 +1567,7 @@ } } }, - "revision": "20210626", + "revision": "20210703", "rootUrl": "https://adsense.googleapis.com/", "schemas": { "Account": { diff --git a/googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json b/googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json index cf0c945bbb5..0a4a86cb3db 100644 --- a/googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json @@ -423,7 +423,7 @@ } } }, - "revision": "20210622", + "revision": "20210701", "rootUrl": "https://alertcenter.googleapis.com/", "schemas": { "AccountWarning": { diff --git a/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json b/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json index d1f3f9b2727..c97644a2069 100644 --- a/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json @@ -2762,7 +2762,7 @@ } } }, - "revision": "20210625", + "revision": "20210705", "rootUrl": "https://analyticsadmin.googleapis.com/", "schemas": { "GoogleAnalyticsAdminV1alphaAccount": { diff --git a/googleapiclient/discovery_cache/documents/analyticsdata.v1beta.json b/googleapiclient/discovery_cache/documents/analyticsdata.v1beta.json index 9c52634d8b7..32b826776ec 100644 --- a/googleapiclient/discovery_cache/documents/analyticsdata.v1beta.json +++ b/googleapiclient/discovery_cache/documents/analyticsdata.v1beta.json @@ -284,7 +284,7 @@ } } }, - "revision": "20210625", + "revision": "20210629", "rootUrl": "https://analyticsdata.googleapis.com/", "schemas": { "BatchRunPivotReportsRequest": { diff --git a/googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json b/googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json index d7b2ded446d..8aa975d08af 100644 --- a/googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json +++ b/googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json @@ -825,7 +825,7 @@ } } }, - "revision": "20210624", + "revision": "20210703", "rootUrl": "https://androiddeviceprovisioning.googleapis.com/", "schemas": { "ClaimDeviceRequest": { diff --git a/googleapiclient/discovery_cache/documents/androidenterprise.v1.json b/googleapiclient/discovery_cache/documents/androidenterprise.v1.json index 56799eaafc8..9a6a8a0985a 100644 --- a/googleapiclient/discovery_cache/documents/androidenterprise.v1.json +++ b/googleapiclient/discovery_cache/documents/androidenterprise.v1.json @@ -2610,7 +2610,7 @@ } } }, - "revision": "20210623", + "revision": "20210630", "rootUrl": "https://androidenterprise.googleapis.com/", "schemas": { "Administrator": { diff --git a/googleapiclient/discovery_cache/documents/androidmanagement.v1.json b/googleapiclient/discovery_cache/documents/androidmanagement.v1.json index b790bfb9970..785d2bb060d 100644 --- a/googleapiclient/discovery_cache/documents/androidmanagement.v1.json +++ b/googleapiclient/discovery_cache/documents/androidmanagement.v1.json @@ -1004,7 +1004,7 @@ } } }, - "revision": "20210622", + "revision": "20210628", "rootUrl": "https://androidmanagement.googleapis.com/", "schemas": { "AdvancedSecurityOverrides": { diff --git a/googleapiclient/discovery_cache/documents/androidpublisher.v3.json b/googleapiclient/discovery_cache/documents/androidpublisher.v3.json index 75e2278c552..62f5149b739 100644 --- a/googleapiclient/discovery_cache/documents/androidpublisher.v3.json +++ b/googleapiclient/discovery_cache/documents/androidpublisher.v3.json @@ -2676,7 +2676,7 @@ } } }, - "revision": "20210626", + "revision": "20210701", "rootUrl": "https://androidpublisher.googleapis.com/", "schemas": { "Apk": { diff --git a/googleapiclient/discovery_cache/documents/apigateway.v1.json b/googleapiclient/discovery_cache/documents/apigateway.v1.json index 207c2871c48..5170e52b1a5 100644 --- a/googleapiclient/discovery_cache/documents/apigateway.v1.json +++ b/googleapiclient/discovery_cache/documents/apigateway.v1.json @@ -1083,7 +1083,7 @@ } } }, - "revision": "20210616", + "revision": "20210624", "rootUrl": "https://apigateway.googleapis.com/", "schemas": { "ApigatewayApi": { diff --git a/googleapiclient/discovery_cache/documents/apigateway.v1beta.json b/googleapiclient/discovery_cache/documents/apigateway.v1beta.json index dd697d3a1f7..a4477faa9c6 100644 --- a/googleapiclient/discovery_cache/documents/apigateway.v1beta.json +++ b/googleapiclient/discovery_cache/documents/apigateway.v1beta.json @@ -1083,7 +1083,7 @@ } } }, - "revision": "20210616", + "revision": "20210624", "rootUrl": "https://apigateway.googleapis.com/", "schemas": { "ApigatewayApi": { diff --git a/googleapiclient/discovery_cache/documents/apikeys.v2.json b/googleapiclient/discovery_cache/documents/apikeys.v2.json index 450a030979b..9839ea4e51d 100644 --- a/googleapiclient/discovery_cache/documents/apikeys.v2.json +++ b/googleapiclient/discovery_cache/documents/apikeys.v2.json @@ -424,7 +424,7 @@ } } }, - "revision": "20210625", + "revision": "20210628", "rootUrl": "https://apikeys.googleapis.com/", "schemas": { "Operation": { diff --git a/googleapiclient/discovery_cache/documents/appengine.v1.json b/googleapiclient/discovery_cache/documents/appengine.v1.json index 1478fd45e96..6e774a45f9c 100644 --- a/googleapiclient/discovery_cache/documents/appengine.v1.json +++ b/googleapiclient/discovery_cache/documents/appengine.v1.json @@ -1595,7 +1595,7 @@ } } }, - "revision": "20210618", + "revision": "20210629", "rootUrl": "https://appengine.googleapis.com/", "schemas": { "ApiConfigHandler": { diff --git a/googleapiclient/discovery_cache/documents/appengine.v1alpha.json b/googleapiclient/discovery_cache/documents/appengine.v1alpha.json index 0426b7499cb..2cbc5faaad0 100644 --- a/googleapiclient/discovery_cache/documents/appengine.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/appengine.v1alpha.json @@ -709,7 +709,7 @@ } } }, - "revision": "20210618", + "revision": "20210629", "rootUrl": "https://appengine.googleapis.com/", "schemas": { "AuthorizedCertificate": { diff --git a/googleapiclient/discovery_cache/documents/appengine.v1beta.json b/googleapiclient/discovery_cache/documents/appengine.v1beta.json index c3fa6208b59..fd27d2a8cf7 100644 --- a/googleapiclient/discovery_cache/documents/appengine.v1beta.json +++ b/googleapiclient/discovery_cache/documents/appengine.v1beta.json @@ -1595,7 +1595,7 @@ } } }, - "revision": "20210618", + "revision": "20210629", "rootUrl": "https://appengine.googleapis.com/", "schemas": { "ApiConfigHandler": { diff --git a/googleapiclient/discovery_cache/documents/area120tables.v1alpha1.json b/googleapiclient/discovery_cache/documents/area120tables.v1alpha1.json index f93a41f4746..797488320f7 100644 --- a/googleapiclient/discovery_cache/documents/area120tables.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/area120tables.v1alpha1.json @@ -12,10 +12,10 @@ "description": "See and download all your Google Drive files" }, "https://www.googleapis.com/auth/spreadsheets": { - "description": "See, edit, create, and delete your spreadsheets in Google Drive" + "description": "See, edit, create, and delete all your Google Sheets spreadsheets" }, "https://www.googleapis.com/auth/spreadsheets.readonly": { - "description": "View your Google Spreadsheets" + "description": "See all your Google Sheets spreadsheets" }, "https://www.googleapis.com/auth/tables": { "description": "See, edit, create, and delete your tables in Tables by Area 120" @@ -586,7 +586,7 @@ } } }, - "revision": "20210625", + "revision": "20210703", "rootUrl": "https://area120tables.googleapis.com/", "schemas": { "BatchCreateRowsRequest": { diff --git a/googleapiclient/discovery_cache/documents/artifactregistry.v1.json b/googleapiclient/discovery_cache/documents/artifactregistry.v1.json index 897454cf613..d33ae7fd009 100644 --- a/googleapiclient/discovery_cache/documents/artifactregistry.v1.json +++ b/googleapiclient/discovery_cache/documents/artifactregistry.v1.json @@ -348,9 +348,57 @@ } } }, - "revision": "20210622", + "revision": "20210629", "rootUrl": "https://artifactregistry.googleapis.com/", "schemas": { + "AptArtifact": { + "description": "A detailed representation of an Apt artifact. Information in the record is derived from the archive's control file. See https://www.debian.org/doc/debian-policy/ch-controlfields.html", + "id": "AptArtifact", + "properties": { + "architecture": { + "description": "Output only. Operating system architecture of the artifact.", + "readOnly": true, + "type": "string" + }, + "component": { + "description": "Output only. Repository component of the artifact.", + "readOnly": true, + "type": "string" + }, + "controlFile": { + "description": "Output only. Contents of the artifact's control metadata file.", + "format": "byte", + "readOnly": true, + "type": "string" + }, + "name": { + "description": "Output only. The Artifact Registry resource name of the artifact.", + "readOnly": true, + "type": "string" + }, + "packageName": { + "description": "Output only. The Apt package name of the artifact.", + "readOnly": true, + "type": "string" + }, + "packageType": { + "description": "Output only. An artifact is a binary or source package.", + "enum": [ + "PACKAGE_TYPE_UNSPECIFIED", + "BINARY", + "SOURCE" + ], + "enumDescriptions": [ + "Package type is not specified.", + "Binary package.", + "Source package." + ], + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, "CancelOperationRequest": { "description": "The request message for Operations.CancelOperation.", "id": "CancelOperationRequest", @@ -404,6 +452,114 @@ "properties": {}, "type": "object" }, + "ImportAptArtifactsErrorInfo": { + "description": "Error information explaining why a package was not imported.", + "id": "ImportAptArtifactsErrorInfo", + "properties": { + "error": { + "$ref": "Status", + "description": "The detailed error status." + }, + "gcsSource": { + "$ref": "ImportAptArtifactsGcsSource", + "description": "Google Cloud Storage location requested." + } + }, + "type": "object" + }, + "ImportAptArtifactsGcsSource": { + "description": "Google Cloud Storage location where the artifacts currently reside.", + "id": "ImportAptArtifactsGcsSource", + "properties": { + "uris": { + "description": "Cloud Storage paths URI (e.g., gs://my_bucket//my_object).", + "items": { + "type": "string" + }, + "type": "array" + }, + "useWildcards": { + "description": "Supports URI wildcards for matching multiple objects from a single URI.", + "type": "boolean" + } + }, + "type": "object" + }, + "ImportAptArtifactsResponse": { + "description": "The response message from importing artifacts.", + "id": "ImportAptArtifactsResponse", + "properties": { + "aptArtifacts": { + "description": "The Apt artifacts updated.", + "items": { + "$ref": "AptArtifact" + }, + "type": "array" + }, + "errors": { + "description": "Detailed error info for packages that were not imported.", + "items": { + "$ref": "ImportAptArtifactsErrorInfo" + }, + "type": "array" + } + }, + "type": "object" + }, + "ImportYumArtifactsErrorInfo": { + "description": "Error information explaining why a package was not imported.", + "id": "ImportYumArtifactsErrorInfo", + "properties": { + "error": { + "$ref": "Status", + "description": "The detailed error status." + }, + "gcsSource": { + "$ref": "ImportYumArtifactsGcsSource", + "description": "Google Cloud Storage location requested." + } + }, + "type": "object" + }, + "ImportYumArtifactsGcsSource": { + "description": "Google Cloud Storage location where the artifacts currently reside.", + "id": "ImportYumArtifactsGcsSource", + "properties": { + "uris": { + "description": "Cloud Storage paths URI (e.g., gs://my_bucket//my_object).", + "items": { + "type": "string" + }, + "type": "array" + }, + "useWildcards": { + "description": "Supports URI wildcards for matching multiple objects from a single URI.", + "type": "boolean" + } + }, + "type": "object" + }, + "ImportYumArtifactsResponse": { + "description": "The response message from importing artifacts.", + "id": "ImportYumArtifactsResponse", + "properties": { + "errors": { + "description": "Detailed error info for packages that were not imported.", + "items": { + "$ref": "ImportYumArtifactsErrorInfo" + }, + "type": "array" + }, + "yumArtifacts": { + "description": "The yum artifacts updated.", + "items": { + "$ref": "YumArtifact" + }, + "type": "array" + } + }, + "type": "object" + }, "ListDockerImagesResponse": { "description": "The response from listing docker images.", "id": "ListDockerImagesResponse", @@ -575,6 +731,93 @@ } }, "type": "object" + }, + "UploadAptArtifactMediaResponse": { + "description": "The response to upload an artifact.", + "id": "UploadAptArtifactMediaResponse", + "properties": { + "operation": { + "$ref": "Operation", + "description": "Operation to be returned to the user." + } + }, + "type": "object" + }, + "UploadAptArtifactResponse": { + "description": "The response of the completed artifact upload operation. This response is contained in the Operation and available to users.", + "id": "UploadAptArtifactResponse", + "properties": { + "aptArtifacts": { + "description": "The Apt artifacts updated.", + "items": { + "$ref": "AptArtifact" + }, + "type": "array" + } + }, + "type": "object" + }, + "UploadYumArtifactMediaResponse": { + "description": "The response to upload an artifact.", + "id": "UploadYumArtifactMediaResponse", + "properties": { + "operation": { + "$ref": "Operation", + "description": "Operation to be returned to the user." + } + }, + "type": "object" + }, + "UploadYumArtifactResponse": { + "description": "The response of the completed artifact upload operation. This response is contained in the Operation and available to users.", + "id": "UploadYumArtifactResponse", + "properties": { + "yumArtifacts": { + "description": "The Apt artifacts updated.", + "items": { + "$ref": "YumArtifact" + }, + "type": "array" + } + }, + "type": "object" + }, + "YumArtifact": { + "description": "A detailed representation of a Yum artifact.", + "id": "YumArtifact", + "properties": { + "architecture": { + "description": "Output only. Operating system architecture of the artifact.", + "readOnly": true, + "type": "string" + }, + "name": { + "description": "Output only. The Artifact Registry resource name of the artifact.", + "readOnly": true, + "type": "string" + }, + "packageName": { + "description": "Output only. The yum package name of the artifact.", + "readOnly": true, + "type": "string" + }, + "packageType": { + "description": "Output only. An artifact is a binary or source package.", + "enum": [ + "PACKAGE_TYPE_UNSPECIFIED", + "BINARY", + "SOURCE" + ], + "enumDescriptions": [ + "Package type is not specified.", + "Binary package (.rpm). .rpm", + "Source package (.srpm)." + ], + "readOnly": true, + "type": "string" + } + }, + "type": "object" } }, "servicePath": "", diff --git a/googleapiclient/discovery_cache/documents/artifactregistry.v1beta1.json b/googleapiclient/discovery_cache/documents/artifactregistry.v1beta1.json index f4bd089a5c3..551d57cbd04 100644 --- a/googleapiclient/discovery_cache/documents/artifactregistry.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/artifactregistry.v1beta1.json @@ -971,9 +971,57 @@ } } }, - "revision": "20210622", + "revision": "20210629", "rootUrl": "https://artifactregistry.googleapis.com/", "schemas": { + "AptArtifact": { + "description": "A detailed representation of an Apt artifact. Information in the record is derived from the archive's control file. See https://www.debian.org/doc/debian-policy/ch-controlfields.html", + "id": "AptArtifact", + "properties": { + "architecture": { + "description": "Output only. Operating system architecture of the artifact.", + "readOnly": true, + "type": "string" + }, + "component": { + "description": "Output only. Repository component of the artifact.", + "readOnly": true, + "type": "string" + }, + "controlFile": { + "description": "Output only. Contents of the artifact's control metadata file.", + "format": "byte", + "readOnly": true, + "type": "string" + }, + "name": { + "description": "Output only. The Artifact Registry resource name of the artifact.", + "readOnly": true, + "type": "string" + }, + "packageName": { + "description": "Output only. The Apt package name of the artifact.", + "readOnly": true, + "type": "string" + }, + "packageType": { + "description": "Output only. An artifact is a binary or source package.", + "enum": [ + "PACKAGE_TYPE_UNSPECIFIED", + "BINARY", + "SOURCE" + ], + "enumDescriptions": [ + "Package type is not specified.", + "Binary package.", + "Source package." + ], + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, "Binding": { "description": "Associates `members` with a `role`.", "id": "Binding", @@ -1086,6 +1134,114 @@ }, "type": "object" }, + "ImportAptArtifactsErrorInfo": { + "description": "Error information explaining why a package was not imported.", + "id": "ImportAptArtifactsErrorInfo", + "properties": { + "error": { + "$ref": "Status", + "description": "The detailed error status." + }, + "gcsSource": { + "$ref": "ImportAptArtifactsGcsSource", + "description": "Google Cloud Storage location requested." + } + }, + "type": "object" + }, + "ImportAptArtifactsGcsSource": { + "description": "Google Cloud Storage location where the artifacts currently reside.", + "id": "ImportAptArtifactsGcsSource", + "properties": { + "uris": { + "description": "Cloud Storage paths URI (e.g., gs://my_bucket//my_object).", + "items": { + "type": "string" + }, + "type": "array" + }, + "useWildcards": { + "description": "Supports URI wildcards for matching multiple objects from a single URI.", + "type": "boolean" + } + }, + "type": "object" + }, + "ImportAptArtifactsResponse": { + "description": "The response message from importing artifacts.", + "id": "ImportAptArtifactsResponse", + "properties": { + "aptArtifacts": { + "description": "The Apt artifacts updated.", + "items": { + "$ref": "AptArtifact" + }, + "type": "array" + }, + "errors": { + "description": "Detailed error info for packages that were not imported.", + "items": { + "$ref": "ImportAptArtifactsErrorInfo" + }, + "type": "array" + } + }, + "type": "object" + }, + "ImportYumArtifactsErrorInfo": { + "description": "Error information explaining why a package was not imported.", + "id": "ImportYumArtifactsErrorInfo", + "properties": { + "error": { + "$ref": "Status", + "description": "The detailed error status." + }, + "gcsSource": { + "$ref": "ImportYumArtifactsGcsSource", + "description": "Google Cloud Storage location requested." + } + }, + "type": "object" + }, + "ImportYumArtifactsGcsSource": { + "description": "Google Cloud Storage location where the artifacts currently reside.", + "id": "ImportYumArtifactsGcsSource", + "properties": { + "uris": { + "description": "Cloud Storage paths URI (e.g., gs://my_bucket//my_object).", + "items": { + "type": "string" + }, + "type": "array" + }, + "useWildcards": { + "description": "Supports URI wildcards for matching multiple objects from a single URI.", + "type": "boolean" + } + }, + "type": "object" + }, + "ImportYumArtifactsResponse": { + "description": "The response message from importing artifacts.", + "id": "ImportYumArtifactsResponse", + "properties": { + "errors": { + "description": "Detailed error info for packages that were not imported.", + "items": { + "$ref": "ImportYumArtifactsErrorInfo" + }, + "type": "array" + }, + "yumArtifacts": { + "description": "The yum artifacts updated.", + "items": { + "$ref": "YumArtifact" + }, + "type": "array" + } + }, + "type": "object" + }, "ListFilesResponse": { "description": "The response from listing files.", "id": "ListFilesResponse", @@ -1467,6 +1623,56 @@ }, "type": "object" }, + "UploadAptArtifactMediaResponse": { + "description": "The response to upload an artifact.", + "id": "UploadAptArtifactMediaResponse", + "properties": { + "operation": { + "$ref": "Operation", + "description": "Operation to be returned to the user." + } + }, + "type": "object" + }, + "UploadAptArtifactResponse": { + "description": "The response of the completed artifact upload operation. This response is contained in the Operation and available to users.", + "id": "UploadAptArtifactResponse", + "properties": { + "aptArtifacts": { + "description": "The Apt artifacts updated.", + "items": { + "$ref": "AptArtifact" + }, + "type": "array" + } + }, + "type": "object" + }, + "UploadYumArtifactMediaResponse": { + "description": "The response to upload an artifact.", + "id": "UploadYumArtifactMediaResponse", + "properties": { + "operation": { + "$ref": "Operation", + "description": "Operation to be returned to the user." + } + }, + "type": "object" + }, + "UploadYumArtifactResponse": { + "description": "The response of the completed artifact upload operation. This response is contained in the Operation and available to users.", + "id": "UploadYumArtifactResponse", + "properties": { + "yumArtifacts": { + "description": "The Apt artifacts updated.", + "items": { + "$ref": "YumArtifact" + }, + "type": "array" + } + }, + "type": "object" + }, "Version": { "description": "The body of a version resource. A version resource represents a collection of components, such as files and other data. This may correspond to a version in many package management schemes.", "id": "Version", @@ -1498,6 +1704,43 @@ } }, "type": "object" + }, + "YumArtifact": { + "description": "A detailed representation of a Yum artifact.", + "id": "YumArtifact", + "properties": { + "architecture": { + "description": "Output only. Operating system architecture of the artifact.", + "readOnly": true, + "type": "string" + }, + "name": { + "description": "Output only. The Artifact Registry resource name of the artifact.", + "readOnly": true, + "type": "string" + }, + "packageName": { + "description": "Output only. The yum package name of the artifact.", + "readOnly": true, + "type": "string" + }, + "packageType": { + "description": "Output only. An artifact is a binary or source package.", + "enum": [ + "PACKAGE_TYPE_UNSPECIFIED", + "BINARY", + "SOURCE" + ], + "enumDescriptions": [ + "Package type is not specified.", + "Binary package (.rpm). .rpm", + "Source package (.srpm)." + ], + "readOnly": true, + "type": "string" + } + }, + "type": "object" } }, "servicePath": "", diff --git a/googleapiclient/discovery_cache/documents/artifactregistry.v1beta2.json b/googleapiclient/discovery_cache/documents/artifactregistry.v1beta2.json index b86472c9712..b1e655fb167 100644 --- a/googleapiclient/discovery_cache/documents/artifactregistry.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/artifactregistry.v1beta2.json @@ -502,6 +502,38 @@ } }, "resources": { + "aptArtifacts": { + "methods": { + "import": { + "description": "Imports Apt artifacts. The returned Operation will complete once the resources are imported. Package, Version, and File resources are created based on the imported artifacts. Imported artifacts that conflict with existing resources are ignored.", + "flatPath": "v1beta2/projects/{projectsId}/locations/{locationsId}/repositories/{repositoriesId}/aptArtifacts:import", + "httpMethod": "POST", + "id": "artifactregistry.projects.locations.repositories.aptArtifacts.import", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "The name of the parent resource where the artifacts will be imported.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/repositories/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1beta2/{+parent}/aptArtifacts:import", + "request": { + "$ref": "ImportAptArtifactsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, "files": { "methods": { "get": { @@ -963,6 +995,38 @@ } } } + }, + "yumArtifacts": { + "methods": { + "import": { + "description": "Imports Yum (RPM) artifacts. The returned Operation will complete once the resources are imported. Package, Version, and File resources are created based on the imported artifacts. Imported artifacts that conflict with existing resources are ignored.", + "flatPath": "v1beta2/projects/{projectsId}/locations/{locationsId}/repositories/{repositoriesId}/yumArtifacts:import", + "httpMethod": "POST", + "id": "artifactregistry.projects.locations.repositories.yumArtifacts.import", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "The name of the parent resource where the artifacts will be imported.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/repositories/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1beta2/{+parent}/yumArtifacts:import", + "request": { + "$ref": "ImportYumArtifactsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } } } } @@ -971,9 +1035,57 @@ } } }, - "revision": "20210622", + "revision": "20210629", "rootUrl": "https://artifactregistry.googleapis.com/", "schemas": { + "AptArtifact": { + "description": "A detailed representation of an Apt artifact. Information in the record is derived from the archive's control file. See https://www.debian.org/doc/debian-policy/ch-controlfields.html", + "id": "AptArtifact", + "properties": { + "architecture": { + "description": "Output only. Operating system architecture of the artifact.", + "readOnly": true, + "type": "string" + }, + "component": { + "description": "Output only. Repository component of the artifact.", + "readOnly": true, + "type": "string" + }, + "controlFile": { + "description": "Output only. Contents of the artifact's control metadata file.", + "format": "byte", + "readOnly": true, + "type": "string" + }, + "name": { + "description": "Output only. The Artifact Registry resource name of the artifact.", + "readOnly": true, + "type": "string" + }, + "packageName": { + "description": "Output only. The Apt package name of the artifact.", + "readOnly": true, + "type": "string" + }, + "packageType": { + "description": "Output only. An artifact is a binary or source package.", + "enum": [ + "PACKAGE_TYPE_UNSPECIFIED", + "BINARY", + "SOURCE" + ], + "enumDescriptions": [ + "Package type is not specified.", + "Binary package.", + "Source package." + ], + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, "Binding": { "description": "Associates `members` with a `role`.", "id": "Binding", @@ -1086,6 +1198,136 @@ }, "type": "object" }, + "ImportAptArtifactsErrorInfo": { + "description": "Error information explaining why a package was not imported.", + "id": "ImportAptArtifactsErrorInfo", + "properties": { + "error": { + "$ref": "Status", + "description": "The detailed error status." + }, + "gcsSource": { + "$ref": "ImportAptArtifactsGcsSource", + "description": "Google Cloud Storage location requested." + } + }, + "type": "object" + }, + "ImportAptArtifactsGcsSource": { + "description": "Google Cloud Storage location where the artifacts currently reside.", + "id": "ImportAptArtifactsGcsSource", + "properties": { + "uris": { + "description": "Cloud Storage paths URI (e.g., gs://my_bucket//my_object).", + "items": { + "type": "string" + }, + "type": "array" + }, + "useWildcards": { + "description": "Supports URI wildcards for matching multiple objects from a single URI.", + "type": "boolean" + } + }, + "type": "object" + }, + "ImportAptArtifactsRequest": { + "description": "The request to import new apt artifacts.", + "id": "ImportAptArtifactsRequest", + "properties": { + "gcsSource": { + "$ref": "ImportAptArtifactsGcsSource", + "description": "Google Cloud Storage location where input content is located." + } + }, + "type": "object" + }, + "ImportAptArtifactsResponse": { + "description": "The response message from importing artifacts.", + "id": "ImportAptArtifactsResponse", + "properties": { + "aptArtifacts": { + "description": "The Apt artifacts updated.", + "items": { + "$ref": "AptArtifact" + }, + "type": "array" + }, + "errors": { + "description": "Detailed error info for packages that were not imported.", + "items": { + "$ref": "ImportAptArtifactsErrorInfo" + }, + "type": "array" + } + }, + "type": "object" + }, + "ImportYumArtifactsErrorInfo": { + "description": "Error information explaining why a package was not imported.", + "id": "ImportYumArtifactsErrorInfo", + "properties": { + "error": { + "$ref": "Status", + "description": "The detailed error status." + }, + "gcsSource": { + "$ref": "ImportYumArtifactsGcsSource", + "description": "Google Cloud Storage location requested." + } + }, + "type": "object" + }, + "ImportYumArtifactsGcsSource": { + "description": "Google Cloud Storage location where the artifacts currently reside.", + "id": "ImportYumArtifactsGcsSource", + "properties": { + "uris": { + "description": "Cloud Storage paths URI (e.g., gs://my_bucket//my_object).", + "items": { + "type": "string" + }, + "type": "array" + }, + "useWildcards": { + "description": "Supports URI wildcards for matching multiple objects from a single URI.", + "type": "boolean" + } + }, + "type": "object" + }, + "ImportYumArtifactsRequest": { + "description": "The request to import new yum artifacts.", + "id": "ImportYumArtifactsRequest", + "properties": { + "gcsSource": { + "$ref": "ImportYumArtifactsGcsSource", + "description": "Google Cloud Storage location where input content is located." + } + }, + "type": "object" + }, + "ImportYumArtifactsResponse": { + "description": "The response message from importing artifacts.", + "id": "ImportYumArtifactsResponse", + "properties": { + "errors": { + "description": "Detailed error info for packages that were not imported.", + "items": { + "$ref": "ImportYumArtifactsErrorInfo" + }, + "type": "array" + }, + "yumArtifacts": { + "description": "The yum artifacts updated.", + "items": { + "$ref": "YumArtifact" + }, + "type": "array" + } + }, + "type": "object" + }, "ListFilesResponse": { "description": "The response from listing files.", "id": "ListFilesResponse", @@ -1467,6 +1709,56 @@ }, "type": "object" }, + "UploadAptArtifactMediaResponse": { + "description": "The response to upload an artifact.", + "id": "UploadAptArtifactMediaResponse", + "properties": { + "operation": { + "$ref": "Operation", + "description": "Operation to be returned to the user." + } + }, + "type": "object" + }, + "UploadAptArtifactResponse": { + "description": "The response of the completed artifact upload operation. This response is contained in the Operation and available to users.", + "id": "UploadAptArtifactResponse", + "properties": { + "aptArtifacts": { + "description": "The Apt artifacts updated.", + "items": { + "$ref": "AptArtifact" + }, + "type": "array" + } + }, + "type": "object" + }, + "UploadYumArtifactMediaResponse": { + "description": "The response to upload an artifact.", + "id": "UploadYumArtifactMediaResponse", + "properties": { + "operation": { + "$ref": "Operation", + "description": "Operation to be returned to the user." + } + }, + "type": "object" + }, + "UploadYumArtifactResponse": { + "description": "The response of the completed artifact upload operation. This response is contained in the Operation and available to users.", + "id": "UploadYumArtifactResponse", + "properties": { + "yumArtifacts": { + "description": "The Apt artifacts updated.", + "items": { + "$ref": "YumArtifact" + }, + "type": "array" + } + }, + "type": "object" + }, "Version": { "description": "The body of a version resource. A version resource represents a collection of components, such as files and other data. This may correspond to a version in many package management schemes.", "id": "Version", @@ -1507,6 +1799,43 @@ } }, "type": "object" + }, + "YumArtifact": { + "description": "A detailed representation of a Yum artifact.", + "id": "YumArtifact", + "properties": { + "architecture": { + "description": "Output only. Operating system architecture of the artifact.", + "readOnly": true, + "type": "string" + }, + "name": { + "description": "Output only. The Artifact Registry resource name of the artifact.", + "readOnly": true, + "type": "string" + }, + "packageName": { + "description": "Output only. The yum package name of the artifact.", + "readOnly": true, + "type": "string" + }, + "packageType": { + "description": "Output only. An artifact is a binary or source package.", + "enum": [ + "PACKAGE_TYPE_UNSPECIFIED", + "BINARY", + "SOURCE" + ], + "enumDescriptions": [ + "Package type is not specified.", + "Binary package (.rpm). .rpm", + "Source package (.srpm)." + ], + "readOnly": true, + "type": "string" + } + }, + "type": "object" } }, "servicePath": "", diff --git a/googleapiclient/discovery_cache/documents/baremetalsolution.v1.json b/googleapiclient/discovery_cache/documents/baremetalsolution.v1.json index 7cf4dd554c9..83c16382249 100644 --- a/googleapiclient/discovery_cache/documents/baremetalsolution.v1.json +++ b/googleapiclient/discovery_cache/documents/baremetalsolution.v1.json @@ -11,7 +11,7 @@ "basePath": "", "baseUrl": "https://baremetalsolution.googleapis.com/", "batchPath": "batch", - "description": "Bare Metal Solution provides hardware to run specialized workloads with low latency on Google Cloud.", + "description": "Provides ways to manage Bare Metal Solution hardware installed in a regional extension located near a Google Cloud data center.", "discoveryVersion": "v1", "documentationLink": "https://cloud.google.com/bare-metal", "fullyEncodeReservedExpansion": true, @@ -145,7 +145,7 @@ } } }, - "revision": "20210525", + "revision": "20210701", "rootUrl": "https://baremetalsolution.googleapis.com/", "schemas": { "ResetInstanceRequest": { diff --git a/googleapiclient/discovery_cache/documents/baremetalsolution.v1alpha1.json b/googleapiclient/discovery_cache/documents/baremetalsolution.v1alpha1.json new file mode 100644 index 00000000000..fecfe4e7fad --- /dev/null +++ b/googleapiclient/discovery_cache/documents/baremetalsolution.v1alpha1.json @@ -0,0 +1,1821 @@ +{ + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "See, edit, configure, and delete your Google Cloud Platform data" + } + } + } + }, + "basePath": "", + "baseUrl": "https://baremetalsolution.googleapis.com/", + "batchPath": "batch", + "description": "Provides ways to manage Bare Metal Solution hardware installed in a regional extension located near a Google Cloud data center.", + "discoveryVersion": "v1", + "documentationLink": "https://cloud.google.com/bare-metal", + "fullyEncodeReservedExpansion": true, + "icons": { + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" + }, + "id": "baremetalsolution:v1alpha1", + "kind": "discovery#restDescription", + "mtlsRootUrl": "https://baremetalsolution.mtls.googleapis.com/", + "name": "baremetalsolution", + "ownerDomain": "google.com", + "ownerName": "Google", + "parameters": { + "$.xgafv": { + "description": "V1 error format.", + "enum": [ + "1", + "2" + ], + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "type": "string" + }, + "access_token": { + "description": "OAuth access token.", + "location": "query", + "type": "string" + }, + "alt": { + "default": "json", + "description": "Data format for response.", + "enum": [ + "json", + "media", + "proto" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "type": "string" + }, + "callback": { + "description": "JSONP", + "location": "query", + "type": "string" + }, + "fields": { + "description": "Selector specifying which fields to include in a partial response.", + "location": "query", + "type": "string" + }, + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query", + "type": "string" + }, + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", + "location": "query", + "type": "string" + }, + "prettyPrint": { + "default": "true", + "description": "Returns response with indentations and line breaks.", + "location": "query", + "type": "boolean" + }, + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "location": "query", + "type": "string" + }, + "uploadType": { + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "location": "query", + "type": "string" + }, + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "location": "query", + "type": "string" + } + }, + "protocol": "rest", + "resources": { + "projects": { + "resources": { + "locations": { + "methods": { + "submitProvisioningConfig": { + "description": "Submit a provisiong configuration for a given project.", + "flatPath": "v1alpha1/projects/{projectsId}/locations/{locationsId}:submitProvisioningConfig", + "httpMethod": "POST", + "id": "baremetalsolution.projects.locations.submitProvisioningConfig", + "parameterOrder": [ + "project", + "location" + ], + "parameters": { + "location": { + "description": "Required. The target location of the provisioning request.", + "location": "path", + "pattern": "^locations/[^/]+$", + "required": true, + "type": "string" + }, + "project": { + "description": "Required. The target project of the provisioning request.", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1alpha1/{+project}/{+location}:submitProvisioningConfig", + "request": { + "$ref": "SubmitProvisioningConfigRequest" + }, + "response": { + "$ref": "ProvisioningConfig" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + }, + "resources": { + "instances": { + "methods": { + "disableInteractiveSerialConsole": { + "description": "Disable the interactive serial console feature on a specific machine.", + "flatPath": "v1alpha1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:disableInteractiveSerialConsole", + "httpMethod": "POST", + "id": "baremetalsolution.projects.locations.instances.disableInteractiveSerialConsole", + "parameterOrder": [ + "instance" + ], + "parameters": { + "instance": { + "description": "Required. Name of the instance to disable the interactive serial console feature on.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/instances/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1alpha1/{+instance}:disableInteractiveSerialConsole", + "request": { + "$ref": "DisableInteractiveSerialConsoleRequest" + }, + "response": { + "$ref": "DisableInteractiveSerialConsoleResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "enableInteractiveSerialConsole": { + "description": "Enable the interactive serial console feature on a specific machine.", + "flatPath": "v1alpha1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:enableInteractiveSerialConsole", + "httpMethod": "POST", + "id": "baremetalsolution.projects.locations.instances.enableInteractiveSerialConsole", + "parameterOrder": [ + "instance" + ], + "parameters": { + "instance": { + "description": "Required. Name of the instance to enable the interactive serial console feature on.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/instances/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1alpha1/{+instance}:enableInteractiveSerialConsole", + "request": { + "$ref": "EnableInteractiveSerialConsoleRequest" + }, + "response": { + "$ref": "EnableInteractiveSerialConsoleResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Get details for a specific named Instance.", + "flatPath": "v1alpha1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}", + "httpMethod": "GET", + "id": "baremetalsolution.projects.locations.instances.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the Instance to retrieve.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/instances/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1alpha1/{+name}", + "response": { + "$ref": "Instance" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "List Instances (physical servers).", + "flatPath": "v1alpha1/projects/{projectsId}/locations/{locationsId}/instances", + "httpMethod": "GET", + "id": "baremetalsolution.projects.locations.instances.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "The maximum number of items to return.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "The next_page_token value returned from a previous List request, if any.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The location to list Instances in.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1alpha1/{+parent}/instances", + "response": { + "$ref": "ListInstancesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "readSerialPortOutput": { + "description": "Read the most recent serial port output from a machine.", + "flatPath": "v1alpha1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:readSerialPortOutput", + "httpMethod": "GET", + "id": "baremetalsolution.projects.locations.instances.readSerialPortOutput", + "parameterOrder": [ + "instance" + ], + "parameters": { + "instance": { + "description": "Required. Name of the instance to get serial port output of.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/instances/[^/]+$", + "required": true, + "type": "string" + }, + "startByte": { + "description": "Optional. The start byte of the serial port output to return.", + "format": "int64", + "location": "query", + "type": "string" + } + }, + "path": "v1alpha1/{+instance}:readSerialPortOutput", + "response": { + "$ref": "SerialPortOutput" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "resetInstance": { + "description": "Perform an ungraceful, hard reset on a machine (equivalent to physically turning power off and then back on).", + "flatPath": "v1alpha1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:resetInstance", + "httpMethod": "POST", + "id": "baremetalsolution.projects.locations.instances.resetInstance", + "parameterOrder": [ + "instance" + ], + "parameters": { + "instance": { + "description": "Required. Name of the instance to reset.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/instances/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1alpha1/{+instance}:resetInstance", + "request": { + "$ref": "ResetInstanceRequest" + }, + "response": { + "$ref": "ResetInstanceResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, + "luns": { + "methods": { + "get": { + "description": "Get details for a specific named Lun.", + "flatPath": "v1alpha1/projects/{projectsId}/locations/{locationsId}/luns/{lunsId}", + "httpMethod": "GET", + "id": "baremetalsolution.projects.locations.luns.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the Lun to retrieve.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/luns/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1alpha1/{+name}", + "response": { + "$ref": "Lun" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "List Luns.", + "flatPath": "v1alpha1/projects/{projectsId}/locations/{locationsId}/luns", + "httpMethod": "GET", + "id": "baremetalsolution.projects.locations.luns.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "The maximum number of items to return.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "The next_page_token value returned from a previous List request, if any.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The location to list Luns in.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1alpha1/{+parent}/luns", + "response": { + "$ref": "ListLunsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, + "volumes": { + "methods": { + "get": { + "description": "Get details for a specific named Volume.", + "flatPath": "v1alpha1/projects/{projectsId}/locations/{locationsId}/volumes/{volumesId}", + "httpMethod": "GET", + "id": "baremetalsolution.projects.locations.volumes.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the Volume to retrieve.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/volumes/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1alpha1/{+name}", + "response": { + "$ref": "Volume" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "List the volumes for the specified project", + "flatPath": "v1alpha1/projects/{projectsId}/locations/{locationsId}/volumes", + "httpMethod": "GET", + "id": "baremetalsolution.projects.locations.volumes.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "The maximum number of items to return.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "The next_page_token value returned from a previous List request, if any.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The location to list Volumes in.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1alpha1/{+parent}/volumes", + "response": { + "$ref": "ListVolumesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "description": "Update certain parameters on a Volume.", + "flatPath": "v1alpha1/projects/{projectsId}/locations/{locationsId}/volumes/{volumesId}", + "httpMethod": "PATCH", + "id": "baremetalsolution.projects.locations.volumes.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Output only. The name of this Volume.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/volumes/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "The list of fields to update. The only currently supported field is `snapshot_reserved_space_percent`.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v1alpha1/{+name}", + "request": { + "$ref": "Volume" + }, + "response": { + "$ref": "Volume" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "setVolumeSnapshotSchedulePolicy": { + "description": "Sets the specified snapshot schedule policy on the specified volume.", + "flatPath": "v1alpha1/projects/{projectsId}/locations/{locationsId}/volumes/{volumesId}:setVolumeSnapshotSchedulePolicy", + "httpMethod": "POST", + "id": "baremetalsolution.projects.locations.volumes.setVolumeSnapshotSchedulePolicy", + "parameterOrder": [ + "volume" + ], + "parameters": { + "volume": { + "description": "Required. Name of the volume to set snapshot schedule policy on.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/volumes/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1alpha1/{+volume}:setVolumeSnapshotSchedulePolicy", + "request": { + "$ref": "SetVolumeSnapshotSchedulePolicyRequest" + }, + "response": { + "$ref": "SetVolumeSnapshotSchedulePolicyResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + }, + "resources": { + "snapshots": { + "methods": { + "create": { + "description": "Create snapshot of the specified Volume", + "flatPath": "v1alpha1/projects/{projectsId}/locations/{locationsId}/volumes/{volumesId}/snapshots", + "httpMethod": "POST", + "id": "baremetalsolution.projects.locations.volumes.snapshots.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The Volume containing the VolumeSnapshots.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/volumes/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1alpha1/{+parent}/snapshots", + "request": { + "$ref": "VolumeSnapshot" + }, + "response": { + "$ref": "VolumeSnapshot" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Delete specific named snapshot.", + "flatPath": "v1alpha1/projects/{projectsId}/locations/{locationsId}/volumes/{volumesId}/snapshots/{snapshotsId}", + "httpMethod": "DELETE", + "id": "baremetalsolution.projects.locations.volumes.snapshots.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the snapshot to delete.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/volumes/[^/]+/snapshots/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1alpha1/{+name}", + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Get details for a specific named snapshot.", + "flatPath": "v1alpha1/projects/{projectsId}/locations/{locationsId}/volumes/{volumesId}/snapshots/{snapshotsId}", + "httpMethod": "GET", + "id": "baremetalsolution.projects.locations.volumes.snapshots.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the snapshot to retrieve.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/volumes/[^/]+/snapshots/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1alpha1/{+name}", + "response": { + "$ref": "VolumeSnapshot" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "List the Snapshots for the specified Volume", + "flatPath": "v1alpha1/projects/{projectsId}/locations/{locationsId}/volumes/{volumesId}/snapshots", + "httpMethod": "GET", + "id": "baremetalsolution.projects.locations.volumes.snapshots.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "The maximum number of items to return.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "The next_page_token value returned from a previous List request, if any.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The Volume containing the VolumeSnapshots.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/volumes/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1alpha1/{+parent}/snapshots", + "response": { + "$ref": "ListVolumeSnapshotsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "restore": { + "description": "Restore a VolumeSnapshot.", + "flatPath": "v1alpha1/projects/{projectsId}/locations/{locationsId}/volumes/{volumesId}/snapshots/{snapshotsId}:restore", + "httpMethod": "POST", + "id": "baremetalsolution.projects.locations.volumes.snapshots.restore", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. Name of the VolumeSnapshot to restore.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/volumes/[^/]+/snapshots/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1alpha1/{+name}:restore", + "request": { + "$ref": "RestoreVolumeSnapshotRequest" + }, + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } + } + } + }, + "provisioningQuotas": { + "methods": { + "list": { + "description": "List the budget details to provision resources on a given project.", + "flatPath": "v1alpha1/projects/{projectsId}/provisioningQuotas", + "httpMethod": "GET", + "id": "baremetalsolution.projects.provisioningQuotas.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "The maximum number of items to return.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "The next_page_token value returned from a previous List request, if any.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The parent project containing the provisioning quotas.", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1alpha1/{+parent}/provisioningQuotas", + "response": { + "$ref": "ListProvisioningQuotasResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, + "snapshotSchedulePolicies": { + "methods": { + "create": { + "description": "Create a SnapshotSchedulePolicy.", + "flatPath": "v1alpha1/projects/{projectsId}/snapshotSchedulePolicies", + "httpMethod": "POST", + "id": "baremetalsolution.projects.snapshotSchedulePolicies.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The parent project containing the SnapshotSchedulePolicy.", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1alpha1/{+parent}/snapshotSchedulePolicies", + "request": { + "$ref": "SnapshotSchedulePolicy" + }, + "response": { + "$ref": "SnapshotSchedulePolicy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Delete removes named snapshot schedule policy", + "flatPath": "v1alpha1/projects/{projectsId}/snapshotSchedulePolicies/{snapshotSchedulePoliciesId}", + "httpMethod": "DELETE", + "id": "baremetalsolution.projects.snapshotSchedulePolicies.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the snapshot to delete.", + "location": "path", + "pattern": "^projects/[^/]+/snapshotSchedulePolicies/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1alpha1/{+name}", + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Get details for a specific snapshot schedule policy", + "flatPath": "v1alpha1/projects/{projectsId}/snapshotSchedulePolicies/{snapshotSchedulePoliciesId}", + "httpMethod": "GET", + "id": "baremetalsolution.projects.snapshotSchedulePolicies.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the policy to retrieve.", + "location": "path", + "pattern": "^projects/[^/]+/snapshotSchedulePolicies/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1alpha1/{+name}", + "response": { + "$ref": "SnapshotSchedulePolicy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "List the snapshot schedule policies for the specified project", + "flatPath": "v1alpha1/projects/{projectsId}/snapshotSchedulePolicies", + "httpMethod": "GET", + "id": "baremetalsolution.projects.snapshotSchedulePolicies.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "The maximum number of items to return.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "The next_page_token value returned from a previous List request, if any.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The parent project containing the Snapshot Schedule Policies.", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1alpha1/{+parent}/snapshotSchedulePolicies", + "response": { + "$ref": "ListSnapshotSchedulePoliciesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "description": "Update a SnapshotSchedulePolicy.", + "flatPath": "v1alpha1/projects/{projectsId}/snapshotSchedulePolicies/{snapshotSchedulePoliciesId}", + "httpMethod": "PATCH", + "id": "baremetalsolution.projects.snapshotSchedulePolicies.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Output only. The name of this SnapshotSchedulePolicy.", + "location": "path", + "pattern": "^projects/[^/]+/snapshotSchedulePolicies/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "The list of fields to update.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v1alpha1/{+name}", + "request": { + "$ref": "SnapshotSchedulePolicy" + }, + "response": { + "$ref": "SnapshotSchedulePolicy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, + "sshKeys": { + "methods": { + "create": { + "description": "Create a new SSH key registration in the specified project.", + "flatPath": "v1alpha1/projects/{projectsId}/sshKeys", + "httpMethod": "POST", + "id": "baremetalsolution.projects.sshKeys.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The parent project containing the SSH keys.", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + }, + "sshKeyId": { + "description": "Required. The ID to use for the key, which will become the final component of the key's resource name. This value should be match the regex: [a-zA-Z0-9@.\\-_]{1,64}", + "location": "query", + "type": "string" + } + }, + "path": "v1alpha1/{+parent}/sshKeys", + "request": { + "$ref": "SSHKey" + }, + "response": { + "$ref": "SSHKey" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Delete an SSH key registration in the specified project.", + "flatPath": "v1alpha1/projects/{projectsId}/sshKeys/{sshKeysId}", + "httpMethod": "DELETE", + "id": "baremetalsolution.projects.sshKeys.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the SSH key to delete.", + "location": "path", + "pattern": "^projects/[^/]+/sshKeys/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1alpha1/{+name}", + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "List the public SSH keys registered for the specified project.", + "flatPath": "v1alpha1/projects/{projectsId}/sshKeys", + "httpMethod": "GET", + "id": "baremetalsolution.projects.sshKeys.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "The maximum number of items to return.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "The next_page_token value returned from a previous List request, if any.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The parent project containing the SSH keys.", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1alpha1/{+parent}/sshKeys", + "response": { + "$ref": "ListSSHKeysResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } + } + }, + "revision": "20210701", + "rootUrl": "https://baremetalsolution.googleapis.com/", + "schemas": { + "DisableInteractiveSerialConsoleRequest": { + "description": "Request for DisableInteractiveSerialConsole.", + "id": "DisableInteractiveSerialConsoleRequest", + "properties": {}, + "type": "object" + }, + "DisableInteractiveSerialConsoleResponse": { + "description": "Response for DisableInteractiveSerialConsole.", + "id": "DisableInteractiveSerialConsoleResponse", + "properties": {}, + "type": "object" + }, + "Empty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.", + "id": "Empty", + "properties": {}, + "type": "object" + }, + "EnableInteractiveSerialConsoleRequest": { + "description": "Request for EnableInteractiveSerialConsole.", + "id": "EnableInteractiveSerialConsoleRequest", + "properties": {}, + "type": "object" + }, + "EnableInteractiveSerialConsoleResponse": { + "description": "Response for EnableInteractiveSerialConsole.", + "id": "EnableInteractiveSerialConsoleResponse", + "properties": {}, + "type": "object" + }, + "Instance": { + "description": "An Instance.", + "id": "Instance", + "properties": { + "hyperthreadingEnabled": { + "description": "Is hyperthreading enabled for this instance?", + "type": "boolean" + }, + "luns": { + "description": "The Luns attached to this instance", + "items": { + "$ref": "Lun" + }, + "type": "array" + }, + "name": { + "description": "Output only. The name of this Instance.", + "readOnly": true, + "type": "string" + }, + "scheduledPowerResetTime": { + "description": "The scheduled power reset time.", + "format": "google-datetime", + "type": "string" + }, + "sshEnabled": { + "description": "Is SSH enabled for this instance?", + "type": "boolean" + }, + "state": { + "description": "The state of this Instance.", + "enum": [ + "STATE_UNSPECIFIED", + "RESERVED", + "PROVISIONING", + "PROVISIONED", + "DEPROVISIONING", + "DEPROVISIONED", + "UPDATING" + ], + "enumDescriptions": [ + "The unspecified state.", + "The Instance has been reserved.", + "The Instance is being provisioned.", + "The Instance has been provisioned.", + "The Instance is being deprovisioned.", + "The Instance has been deprovisioned.", + "The Instance is being updated." + ], + "type": "string" + } + }, + "type": "object" + }, + "InstanceConfig": { + "description": "Configuration parameters for a new instance.", + "id": "InstanceConfig", + "properties": { + "clientNetwork": { + "$ref": "NetworkAddress", + "description": "Client network address." + }, + "hyperthreading": { + "description": "Whether the instance should be provisioned with Hyperthreading enabled.", + "type": "boolean" + }, + "id": { + "description": "A transient unique identifier to idenfity an instance within an ProvisioningConfig request.", + "type": "string" + }, + "instanceType": { + "description": "Instance type.", + "type": "string" + }, + "location": { + "description": "Location where to deploy the instance.", + "type": "string" + }, + "osImage": { + "description": "OS image to initialize the instance.", + "type": "string" + }, + "privateNetwork": { + "$ref": "NetworkAddress", + "description": "Private network address, if any." + } + }, + "type": "object" + }, + "InstanceQuota": { + "description": "A resource budget.", + "id": "InstanceQuota", + "properties": { + "availableMachineCount": { + "description": "Number of machines than can be created for the given location and instance_type.", + "format": "int32", + "type": "integer" + }, + "instanceType": { + "description": "Instance type.", + "type": "string" + }, + "location": { + "description": "Location where the quota applies.", + "type": "string" + } + }, + "type": "object" + }, + "ListInstancesResponse": { + "description": "Response for ListInstances.", + "id": "ListInstancesResponse", + "properties": { + "instances": { + "description": "The Instances in this project.", + "items": { + "$ref": "Instance" + }, + "type": "array" + }, + "nextPageToken": { + "description": "Token to retrieve the next page of results, or empty if there are no more results in the list.", + "type": "string" + } + }, + "type": "object" + }, + "ListLunsResponse": { + "description": "Response for ListLuns.", + "id": "ListLunsResponse", + "properties": { + "luns": { + "description": "The Luns in this project.", + "items": { + "$ref": "Lun" + }, + "type": "array" + }, + "nextPageToken": { + "description": "Token to retrieve the next page of results, or empty if there are no more results in the list.", + "type": "string" + } + }, + "type": "object" + }, + "ListProvisioningQuotasResponse": { + "description": "Response for ListProvisioningQuotas.", + "id": "ListProvisioningQuotasResponse", + "properties": { + "nextPageToken": { + "description": "Token to retrieve the next page of results, or empty if there are no more results in the list.", + "type": "string" + }, + "provisioningQuotas": { + "description": "The provisioning quotas registered in this project.", + "items": { + "$ref": "ProvisioningQuota" + }, + "type": "array" + } + }, + "type": "object" + }, + "ListSSHKeysResponse": { + "description": "Response for ListSSHKeys.", + "id": "ListSSHKeysResponse", + "properties": { + "nextPageToken": { + "description": "Token to retrieve the next page of results, or empty if there are no more results in the list.", + "type": "string" + }, + "sshKeys": { + "description": "The SSH keys registered in this project.", + "items": { + "$ref": "SSHKey" + }, + "type": "array" + } + }, + "type": "object" + }, + "ListSnapshotSchedulePoliciesResponse": { + "description": "Response for ListSnapshotSchedulePolicies.", + "id": "ListSnapshotSchedulePoliciesResponse", + "properties": { + "nextPageToken": { + "description": "Token to retrieve the next page of results, or empty if there are no more results in the list.", + "type": "string" + }, + "snapshotSchedulePolicies": { + "description": "The snapshot schedule policies registered in this project.", + "items": { + "$ref": "SnapshotSchedulePolicy" + }, + "type": "array" + } + }, + "type": "object" + }, + "ListVolumeSnapshotsResponse": { + "description": "Response for ListVolumeSnapshots.", + "id": "ListVolumeSnapshotsResponse", + "properties": { + "nextPageToken": { + "description": "Token to retrieve the next page of results, or empty if there are no more results in the list.", + "type": "string" + }, + "volumeSnapshots": { + "description": "The VolumeSnapshots for the volume.", + "items": { + "$ref": "VolumeSnapshot" + }, + "type": "array" + } + }, + "type": "object" + }, + "ListVolumesResponse": { + "description": "Response for ListVolumes.", + "id": "ListVolumesResponse", + "properties": { + "nextPageToken": { + "description": "Token to retrieve the next page of results, or empty if there are no more results in the list.", + "type": "string" + }, + "volumes": { + "description": "The volumes registered in this project.", + "items": { + "$ref": "Volume" + }, + "type": "array" + } + }, + "type": "object" + }, + "Lun": { + "description": "A storage Lun.", + "id": "Lun", + "properties": { + "isBoot": { + "description": "Whether this Lun is a boot Lun.", + "type": "boolean" + }, + "multiprotocolType": { + "description": "The multiprotocol type of this Lun.", + "type": "string" + }, + "name": { + "description": "Output only. The name of this Lun.", + "readOnly": true, + "type": "string" + }, + "remoteVolume": { + "$ref": "Volume", + "description": "The storage volume that this Lun is attached to." + }, + "shareable": { + "description": "Whether this Lun is allowed to be shared between multiple physical servers.", + "type": "boolean" + }, + "sizeGb": { + "description": "The size of this Lun, in gigabytes.", + "format": "int64", + "type": "string" + }, + "state": { + "description": "The state of this Lun.", + "enum": [ + "STATE_UNSPECIFIED", + "RESERVED", + "PROVISIONING", + "PROVISIONED", + "DEPROVISIONING", + "DEPROVISIONED", + "UPDATING" + ], + "enumDescriptions": [ + "The unspecified state.", + "The Lun has been reserved.", + "The Lun is being provisioned.", + "The Lun has been provisioned.", + "The Lun is being deprovisioned.", + "The Lun has been deprovisioned.", + "The Lun is being updated." + ], + "type": "string" + } + }, + "type": "object" + }, + "LunRange": { + "description": "A LUN range.", + "id": "LunRange", + "properties": { + "quantity": { + "description": "Number of LUNs to create.", + "format": "int32", + "type": "integer" + }, + "sizeGb": { + "description": "The requested size of each LUN, in GB.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "NetworkAddress": { + "description": "A network.", + "id": "NetworkAddress", + "properties": { + "address": { + "description": "IP address to be assigned to the server.", + "type": "string" + }, + "networkId": { + "description": "Id of the network to use, within the same ProvisioningConfig request.", + "type": "string" + } + }, + "type": "object" + }, + "NetworkConfig": { + "description": "Configuration parameters for a new network.", + "id": "NetworkConfig", + "properties": { + "bandwidth": { + "description": "Interconnect bandwidth. Set only when type is CLIENT.", + "enum": [ + "BANDWIDTH_UNSPECIFIED", + "BW_1_GBPS", + "BW_2_GBPS", + "BW_5_GBPS", + "BW_10_GBPS" + ], + "enumDescriptions": [ + "Unspecified value.", + "1 Gbps.", + "2 Gbps.", + "5 Gbps.", + "10 Gbps." + ], + "type": "string" + }, + "cidr": { + "description": "CIDR range of the network.", + "type": "string" + }, + "id": { + "description": "A transient unique identifier to identify a volume within an ProvisioningConfig request.", + "type": "string" + }, + "location": { + "description": "Location where to deploy the network.", + "type": "string" + }, + "serviceCidr": { + "description": "Service CIDR, if any.", + "enum": [ + "SERVICE_CIDR_UNSPECIFIED", + "DISABLED", + "HIGH_26", + "HIGH_27", + "HIGH_28" + ], + "enumDescriptions": [ + "Unspecified value.", + "Services are disabled for the given network.", + "Use the highest /26 block of the network to host services.", + "Use the highest /27 block of the network to host services.", + "Use the highest /28 block of the network to host services." + ], + "type": "string" + }, + "type": { + "description": "The type of this network.", + "enum": [ + "TYPE_UNSPECIFIED", + "CLIENT", + "PRIVATE" + ], + "enumDescriptions": [ + "Unspecified value.", + "Client network, that is a network peered to a GCP VPC.", + "Private network, that is a network local to the BMS POD." + ], + "type": "string" + }, + "vlanAttachments": { + "description": "List of VLAN attachments. As of now there are always 2 attachments, but it is going to change in the future (multi vlan).", + "items": { + "$ref": "VlanAttachment" + }, + "type": "array" + } + }, + "type": "object" + }, + "NfsExport": { + "description": "A NFS export entry.", + "id": "NfsExport", + "properties": { + "allowDev": { + "description": "Allow dev.", + "type": "boolean" + }, + "allowSuid": { + "description": "Allow the setuid flag.", + "type": "boolean" + }, + "cidr": { + "description": "A CIDR range.", + "type": "string" + }, + "machineId": { + "description": "A single machine, identified by an ID.", + "type": "string" + }, + "networkId": { + "description": "Network to use to publish the export.", + "type": "string" + }, + "noRootSquash": { + "description": "Disable root squashing.", + "type": "boolean" + }, + "permissions": { + "description": "Export permissions.", + "enum": [ + "PERMISSIONS_UNSPECIFIED", + "READ_ONLY", + "READ_WRITE" + ], + "enumDescriptions": [ + "Unspecified value.", + "Read-only permission.", + "Read-write permission." + ], + "type": "string" + } + }, + "type": "object" + }, + "ProvisioningConfig": { + "description": "An provisioning configuration.", + "id": "ProvisioningConfig", + "properties": { + "instances": { + "description": "Instances to be created.", + "items": { + "$ref": "InstanceConfig" + }, + "type": "array" + }, + "networks": { + "description": "Networks to be created.", + "items": { + "$ref": "NetworkConfig" + }, + "type": "array" + }, + "ticketId": { + "description": "A reference to track the request.", + "type": "string" + }, + "volumes": { + "description": "Volumes to be created.", + "items": { + "$ref": "VolumeConfig" + }, + "type": "array" + } + }, + "type": "object" + }, + "ProvisioningQuota": { + "description": "A provisioning quota for a given project.", + "id": "ProvisioningQuota", + "properties": { + "instanceQuota": { + "$ref": "InstanceQuota", + "description": "Instance quota." + } + }, + "type": "object" + }, + "ResetInstanceRequest": { + "description": "Request for ResetInstance.", + "id": "ResetInstanceRequest", + "properties": {}, + "type": "object" + }, + "ResetInstanceResponse": { + "description": "Response for ResetInstance.", + "id": "ResetInstanceResponse", + "properties": {}, + "type": "object" + }, + "RestoreVolumeSnapshotRequest": { + "description": "Request for RestoreVolumeSnapshot.", + "id": "RestoreVolumeSnapshotRequest", + "properties": {}, + "type": "object" + }, + "SSHKey": { + "description": "A public SSH key registered in the project. Used primarily for the interactive serial console feature.", + "id": "SSHKey", + "properties": { + "name": { + "description": "Output only. The name of this SSH key.", + "readOnly": true, + "type": "string" + }, + "publicKey": { + "description": "The public SSH key.", + "type": "string" + } + }, + "type": "object" + }, + "Schedule": { + "description": "A snapshot schedule.", + "id": "Schedule", + "properties": { + "crontabSpec": { + "description": "The crontab-like specification that this Schedule will use to take snapshots.", + "type": "string" + }, + "prefix": { + "description": "A string to prefix names of snapshots created under this Schedule.", + "type": "string" + }, + "retentionCount": { + "description": "The maximum number of snapshots to retain under this Schedule.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "SerialPortOutput": { + "description": "Response for ReadSerialPortOutput.", + "id": "SerialPortOutput", + "properties": { + "contents": { + "description": "The serial port output.", + "type": "string" + }, + "nextStartByte": { + "description": "The byte index to use in a subsequent call to ReadSerialPortOutput to get more output.", + "format": "int64", + "type": "string" + }, + "start": { + "description": "The start byte index of the included contents.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "SetVolumeSnapshotSchedulePolicyRequest": { + "description": "Request for SetVolumeSnapshotSchedulePolicy.", + "id": "SetVolumeSnapshotSchedulePolicyRequest", + "properties": { + "snapshotSchedulePolicy": { + "description": "Required. The name of the policy to set on the volume.", + "type": "string" + } + }, + "type": "object" + }, + "SetVolumeSnapshotSchedulePolicyResponse": { + "description": "Response for SetVolumeSnapshotSchedulePolicy.", + "id": "SetVolumeSnapshotSchedulePolicyResponse", + "properties": {}, + "type": "object" + }, + "SnapshotSchedulePolicy": { + "description": "A snapshot schedule policy.", + "id": "SnapshotSchedulePolicy", + "properties": { + "description": { + "description": "The description of this SnapshotSchedulePolicy.", + "type": "string" + }, + "name": { + "description": "Output only. The name of this SnapshotSchedulePolicy.", + "readOnly": true, + "type": "string" + }, + "schedules": { + "description": "The snapshot Schedules contained in this Policy. At most 5 Schedules may be specified.", + "items": { + "$ref": "Schedule" + }, + "type": "array" + }, + "volumes": { + "description": "The names of the Volumes this policy is associated with.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "SubmitProvisioningConfigRequest": { + "description": "Request for SubmitProvisioningConfig.", + "id": "SubmitProvisioningConfigRequest", + "properties": { + "provisioningConfig": { + "$ref": "ProvisioningConfig", + "description": "Required. The ProvisioningConfig to submit." + } + }, + "type": "object" + }, + "VlanAttachment": { + "description": "A GCP vlan attachment.", + "id": "VlanAttachment", + "properties": { + "id": { + "description": "Identifier of the VLAN attachment.", + "type": "string" + }, + "pairingKey": { + "description": "Attachment pairing key.", + "type": "string" + } + }, + "type": "object" + }, + "Volume": { + "description": "Volume registered in the project.", + "id": "Volume", + "properties": { + "autoGrownSizeGb": { + "description": "The size, in GB, that this Volume has expanded as a result of an auto grow policy.", + "format": "int64", + "type": "string" + }, + "currentSizeGb": { + "description": "The current size of this Volume, in GB, including space reserved for snapshots. This size may be different than the requested size if the Volume has been configured with auto grow or auto shrink.", + "format": "int64", + "type": "string" + }, + "name": { + "description": "Output only. The name of this Volume.", + "readOnly": true, + "type": "string" + }, + "remainingSpaceGb": { + "description": "The space remaining in the Volume for new LUNs, in GB, excluding space reserved for snapshots.", + "format": "int64", + "type": "string" + }, + "requestedSizeGb": { + "description": "The requested size of this Volume, in GB.", + "format": "int64", + "type": "string" + }, + "snapshotReservedSpacePercent": { + "description": "The percent of space on this Volume reserved for snapshots.", + "format": "int32", + "type": "integer" + }, + "snapshotReservedSpaceRemainingGb": { + "description": "The amount, in GB, of space available in this Volume's reserved snapshot space.", + "format": "int64", + "type": "string" + }, + "snapshotReservedSpaceUsedPercent": { + "description": "The percent of reserved snapshot space on this Volume that is actually used by snapshot copies. This may be higher than 100% if snapshot copies are occupying more space than has been reserved on the Volume.", + "format": "int32", + "type": "integer" + }, + "state": { + "description": "The state of this Volume.", + "enum": [ + "STATE_UNSPECIFIED", + "PROVISIONED", + "DEPROVISIONING_REQUESTED", + "DEPROVISIONING_COOLOFF" + ], + "enumDescriptions": [ + "The unspecified state.", + "The Volume has been provisioned.", + "The Volume has been requested to be deprovisioned.", + "The Volume is in deprovisioning cooloff: a deprovisioning has been requested, but data has not yet been deleted." + ], + "type": "string" + }, + "type": { + "description": "The type of this Volume.", + "enum": [ + "TYPE_UNSPECIFIED", + "FLASH", + "DISK" + ], + "enumDescriptions": [ + "The unspecified type.", + "This Volume is on flash.", + "This Volume is on disk." + ], + "type": "string" + } + }, + "type": "object" + }, + "VolumeConfig": { + "description": "Configuration parameters for a new volume.", + "id": "VolumeConfig", + "properties": { + "id": { + "description": "A transient unique identifier to identify a volume within an ProvisioningConfig request.", + "type": "string" + }, + "location": { + "description": "Location where to deploy the volume.", + "type": "string" + }, + "lunRanges": { + "description": "LUN ranges to be configured. Set only when protocol is PROTOCOL_FC.", + "items": { + "$ref": "LunRange" + }, + "type": "array" + }, + "machineIds": { + "description": "Machine ids connected to this volume. Set only when protocol is PROTOCOL_FC.", + "items": { + "type": "string" + }, + "type": "array" + }, + "nfsExports": { + "description": "NFS exports. Set only when protocol is PROTOCOL_NFS.", + "items": { + "$ref": "NfsExport" + }, + "type": "array" + }, + "protocol": { + "description": "Volume protocol.", + "enum": [ + "PROTOCOL_UNSPECIFIED", + "PROTOCOL_FC", + "PROTOCOL_NFS" + ], + "enumDescriptions": [ + "Unspecified value.", + "Fibre channel.", + "Network file system." + ], + "type": "string" + }, + "sizeGb": { + "description": "The requested size of this volume, in GB. This will be updated in a later iteration with a generic size field.", + "format": "int32", + "type": "integer" + }, + "snapshotsEnabled": { + "description": "Whether snapshots should be enabled.", + "type": "boolean" + }, + "type": { + "description": "The type of this Volume.", + "enum": [ + "TYPE_UNSPECIFIED", + "FLASH", + "DISK" + ], + "enumDescriptions": [ + "The unspecified type.", + "This Volume is on flash.", + "This Volume is on disk." + ], + "type": "string" + } + }, + "type": "object" + }, + "VolumeSnapshot": { + "description": "VolumeSnapshot registered for given Volume", + "id": "VolumeSnapshot", + "properties": { + "description": { + "description": "The description of this Snapshot.", + "type": "string" + }, + "name": { + "description": "Output only. The name of this Snapshot.", + "readOnly": true, + "type": "string" + }, + "sizeBytes": { + "description": "The real size of this Snapshot, in bytes.", + "format": "int64", + "type": "string" + }, + "state": { + "description": "The state of this Snapshot.", + "enum": [ + "STATE_UNSPECIFIED", + "CREATING", + "ACTIVE" + ], + "enumDescriptions": [ + "The unspecified state.", + "The Snapshot is currently being created.", + "The Snapshot has been created, and can be used to restore." + ], + "type": "string" + } + }, + "type": "object" + } + }, + "servicePath": "", + "title": "Bare Metal Solution API", + "version": "v1alpha1", + "version_module": true +} \ No newline at end of file diff --git a/googleapiclient/discovery_cache/documents/bigqueryconnection.v1beta1.json b/googleapiclient/discovery_cache/documents/bigqueryconnection.v1beta1.json index 442698c2ad3..2480c6d02dc 100644 --- a/googleapiclient/discovery_cache/documents/bigqueryconnection.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/bigqueryconnection.v1beta1.json @@ -395,7 +395,7 @@ } } }, - "revision": "20210603", + "revision": "20210617", "rootUrl": "https://bigqueryconnection.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/bigquerydatatransfer.v1.json b/googleapiclient/discovery_cache/documents/bigquerydatatransfer.v1.json index d1422cc66e7..30117f64196 100644 --- a/googleapiclient/discovery_cache/documents/bigquerydatatransfer.v1.json +++ b/googleapiclient/discovery_cache/documents/bigquerydatatransfer.v1.json @@ -1280,7 +1280,7 @@ } } }, - "revision": "20210617", + "revision": "20210622", "rootUrl": "https://bigquerydatatransfer.googleapis.com/", "schemas": { "CheckValidCredsRequest": { diff --git a/googleapiclient/discovery_cache/documents/bigtableadmin.v1.json b/googleapiclient/discovery_cache/documents/bigtableadmin.v1.json index db59dcd8054..e0bbe8811ae 100644 --- a/googleapiclient/discovery_cache/documents/bigtableadmin.v1.json +++ b/googleapiclient/discovery_cache/documents/bigtableadmin.v1.json @@ -96,7 +96,7 @@ }, "protocol": "rest", "resources": {}, - "revision": "20210521", + "revision": "20210616", "rootUrl": "https://bigtableadmin.googleapis.com/", "schemas": { "Backup": { diff --git a/googleapiclient/discovery_cache/documents/bigtableadmin.v2.json b/googleapiclient/discovery_cache/documents/bigtableadmin.v2.json index e45cc0b4b87..f2d00ad80da 100644 --- a/googleapiclient/discovery_cache/documents/bigtableadmin.v2.json +++ b/googleapiclient/discovery_cache/documents/bigtableadmin.v2.json @@ -1803,7 +1803,7 @@ } } }, - "revision": "20210521", + "revision": "20210616", "rootUrl": "https://bigtableadmin.googleapis.com/", "schemas": { "AppProfile": { diff --git a/googleapiclient/discovery_cache/documents/billingbudgets.v1.json b/googleapiclient/discovery_cache/documents/billingbudgets.v1.json index e7613b47a47..29c593b304e 100644 --- a/googleapiclient/discovery_cache/documents/billingbudgets.v1.json +++ b/googleapiclient/discovery_cache/documents/billingbudgets.v1.json @@ -270,7 +270,7 @@ } } }, - "revision": "20210621", + "revision": "20210625", "rootUrl": "https://billingbudgets.googleapis.com/", "schemas": { "GoogleCloudBillingBudgetsV1Budget": { diff --git a/googleapiclient/discovery_cache/documents/billingbudgets.v1beta1.json b/googleapiclient/discovery_cache/documents/billingbudgets.v1beta1.json index f7a9bcaa108..48ff498b3c8 100644 --- a/googleapiclient/discovery_cache/documents/billingbudgets.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/billingbudgets.v1beta1.json @@ -264,7 +264,7 @@ } } }, - "revision": "20210621", + "revision": "20210625", "rootUrl": "https://billingbudgets.googleapis.com/", "schemas": { "GoogleCloudBillingBudgetsV1beta1AllUpdatesRule": { diff --git a/googleapiclient/discovery_cache/documents/binaryauthorization.v1.json b/googleapiclient/discovery_cache/documents/binaryauthorization.v1.json index d44a678008a..2bb957e6cec 100644 --- a/googleapiclient/discovery_cache/documents/binaryauthorization.v1.json +++ b/googleapiclient/discovery_cache/documents/binaryauthorization.v1.json @@ -551,7 +551,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://binaryauthorization.googleapis.com/", "schemas": { "AdmissionRule": { diff --git a/googleapiclient/discovery_cache/documents/binaryauthorization.v1beta1.json b/googleapiclient/discovery_cache/documents/binaryauthorization.v1beta1.json index 61576e1ff07..4cb4894368c 100644 --- a/googleapiclient/discovery_cache/documents/binaryauthorization.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/binaryauthorization.v1beta1.json @@ -551,7 +551,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://binaryauthorization.googleapis.com/", "schemas": { "AdmissionRule": { diff --git a/googleapiclient/discovery_cache/documents/blogger.v2.json b/googleapiclient/discovery_cache/documents/blogger.v2.json index 0e6d522ab6b..2e194c84c14 100644 --- a/googleapiclient/discovery_cache/documents/blogger.v2.json +++ b/googleapiclient/discovery_cache/documents/blogger.v2.json @@ -401,7 +401,7 @@ } } }, - "revision": "20210625", + "revision": "20210705", "rootUrl": "https://blogger.googleapis.com/", "schemas": { "Blog": { diff --git a/googleapiclient/discovery_cache/documents/blogger.v3.json b/googleapiclient/discovery_cache/documents/blogger.v3.json index ac2c690fc8e..614519a0e68 100644 --- a/googleapiclient/discovery_cache/documents/blogger.v3.json +++ b/googleapiclient/discovery_cache/documents/blogger.v3.json @@ -1678,7 +1678,7 @@ } } }, - "revision": "20210625", + "revision": "20210705", "rootUrl": "https://blogger.googleapis.com/", "schemas": { "Blog": { diff --git a/googleapiclient/discovery_cache/documents/books.v1.json b/googleapiclient/discovery_cache/documents/books.v1.json index 118e541849e..a16650fe359 100644 --- a/googleapiclient/discovery_cache/documents/books.v1.json +++ b/googleapiclient/discovery_cache/documents/books.v1.json @@ -2671,7 +2671,7 @@ } } }, - "revision": "20210623", + "revision": "20210625", "rootUrl": "https://books.googleapis.com/", "schemas": { "Annotation": { diff --git a/googleapiclient/discovery_cache/documents/calendar.v3.json b/googleapiclient/discovery_cache/documents/calendar.v3.json index e22a5fe0fc1..0adcc47bd54 100644 --- a/googleapiclient/discovery_cache/documents/calendar.v3.json +++ b/googleapiclient/discovery_cache/documents/calendar.v3.json @@ -1723,7 +1723,7 @@ } } }, - "revision": "20210622", + "revision": "20210628", "rootUrl": "https://www.googleapis.com/", "schemas": { "Acl": { diff --git a/googleapiclient/discovery_cache/documents/chat.v1.json b/googleapiclient/discovery_cache/documents/chat.v1.json index 7a0e6432266..bcc886db266 100644 --- a/googleapiclient/discovery_cache/documents/chat.v1.json +++ b/googleapiclient/discovery_cache/documents/chat.v1.json @@ -601,7 +601,7 @@ } } }, - "revision": "20210618", + "revision": "20210626", "rootUrl": "https://chat.googleapis.com/", "schemas": { "ActionParameter": { @@ -2142,6 +2142,7 @@ "createTime": { "description": "Output only. The time at which the message was created in Hangouts Chat server.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "fallbackText": { diff --git a/googleapiclient/discovery_cache/documents/chromemanagement.v1.json b/googleapiclient/discovery_cache/documents/chromemanagement.v1.json index 21265f5c488..2ba1f301db3 100644 --- a/googleapiclient/discovery_cache/documents/chromemanagement.v1.json +++ b/googleapiclient/discovery_cache/documents/chromemanagement.v1.json @@ -288,7 +288,7 @@ } } }, - "revision": "20210623", + "revision": "20210701", "rootUrl": "https://chromemanagement.googleapis.com/", "schemas": { "GoogleChromeManagementV1BrowserVersion": { diff --git a/googleapiclient/discovery_cache/documents/chromepolicy.v1.json b/googleapiclient/discovery_cache/documents/chromepolicy.v1.json index c538d3682b0..a6b834d9cc3 100644 --- a/googleapiclient/discovery_cache/documents/chromepolicy.v1.json +++ b/googleapiclient/discovery_cache/documents/chromepolicy.v1.json @@ -324,7 +324,7 @@ } } }, - "revision": "20210625", + "revision": "20210701", "rootUrl": "https://chromepolicy.googleapis.com/", "schemas": { "GoogleChromePolicyV1AdditionalTargetKeyName": { diff --git a/googleapiclient/discovery_cache/documents/chromeuxreport.v1.json b/googleapiclient/discovery_cache/documents/chromeuxreport.v1.json index 4cde78f4d4b..22b8a87a170 100644 --- a/googleapiclient/discovery_cache/documents/chromeuxreport.v1.json +++ b/googleapiclient/discovery_cache/documents/chromeuxreport.v1.json @@ -116,7 +116,7 @@ } } }, - "revision": "20210624", + "revision": "20210701", "rootUrl": "https://chromeuxreport.googleapis.com/", "schemas": { "Bin": { diff --git a/googleapiclient/discovery_cache/documents/civicinfo.v2.json b/googleapiclient/discovery_cache/documents/civicinfo.v2.json index 234ac53affc..7f2560f86d6 100644 --- a/googleapiclient/discovery_cache/documents/civicinfo.v2.json +++ b/googleapiclient/discovery_cache/documents/civicinfo.v2.json @@ -234,7 +234,8 @@ "highestCourtJudge", "judge", "schoolBoard", - "specialPurposeOfficer" + "specialPurposeOfficer", + "otherRole" ], "enumDescriptions": [ "", @@ -247,6 +248,7 @@ "", "", "", + "", "" ], "location": "query", @@ -320,7 +322,8 @@ "highestCourtJudge", "judge", "schoolBoard", - "specialPurposeOfficer" + "specialPurposeOfficer", + "otherRole" ], "enumDescriptions": [ "", @@ -333,6 +336,7 @@ "", "", "", + "", "" ], "location": "query", @@ -348,7 +352,7 @@ } } }, - "revision": "20210616", + "revision": "20210629", "rootUrl": "https://civicinfo.googleapis.com/", "schemas": { "AdministrationRegion": { @@ -647,7 +651,8 @@ "highestCourtJudge", "judge", "schoolBoard", - "specialPurposeOfficer" + "specialPurposeOfficer", + "otherRole" ], "enumDescriptions": [ "", @@ -660,6 +665,7 @@ "", "", "", + "", "" ], "type": "string" @@ -934,7 +940,8 @@ "highestCourtJudge", "judge", "schoolBoard", - "specialPurposeOfficer" + "specialPurposeOfficer", + "otherRole" ], "enumDescriptions": [ "", @@ -947,6 +954,7 @@ "", "", "", + "", "" ], "type": "string" diff --git a/googleapiclient/discovery_cache/documents/classroom.v1.json b/googleapiclient/discovery_cache/documents/classroom.v1.json index f1f7c72c09a..0425d1e0abe 100644 --- a/googleapiclient/discovery_cache/documents/classroom.v1.json +++ b/googleapiclient/discovery_cache/documents/classroom.v1.json @@ -2400,7 +2400,7 @@ } } }, - "revision": "20210623", + "revision": "20210629", "rootUrl": "https://classroom.googleapis.com/", "schemas": { "Announcement": { diff --git a/googleapiclient/discovery_cache/documents/cloudasset.v1.json b/googleapiclient/discovery_cache/documents/cloudasset.v1.json index 1dac0e2920e..80e0705d79f 100644 --- a/googleapiclient/discovery_cache/documents/cloudasset.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudasset.v1.json @@ -686,6 +686,12 @@ "location": "query", "type": "string" }, + "readMask": { + "description": "Optional. A comma-separated list of fields specifying which fields to be returned in ResourceSearchResult. Only '*' or combination of top level fields can be specified. Field names of both snake_case and camelCase are supported. Examples: `\"*\"`, `\"name,location\"`, `\"name,versionedResources\"`. The read_mask paths must be valid field paths listed but not limited to (both snake_case and camelCase are supported): * name * asset_type or assetType * project * display_name or displayName * description * location * labels * network_tags or networkTags * kms_key or kmsKey * create_time or createTime * update_time or updateTime * state * additional_attributes or additionalAttributes * versioned_resources or versionedResources If read_mask is not specified, all fields except versionedResources will be returned. If only '*' is specified, all fields including versionedResources will be returned. Any invalid field path will trigger INVALID_ARGUMENT error.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + }, "scope": { "description": "Required. A scope can be a project, a folder, or an organization. The search is limited to the resources within the `scope`. The caller must be granted the [`cloudasset.assets.searchAllResources`](https://cloud.google.com/asset-inventory/docs/access-control#required_permissions) permission on the desired scope. The allowed values are: * projects/{PROJECT_ID} (e.g., \"projects/foo-bar\") * projects/{PROJECT_NUMBER} (e.g., \"projects/12345678\") * folders/{FOLDER_NUMBER} (e.g., \"folders/1234567\") * organizations/{ORGANIZATION_NUMBER} (e.g., \"organizations/123456\")", "location": "path", @@ -705,7 +711,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://cloudasset.googleapis.com/", "schemas": { "AccessSelector": { @@ -2801,6 +2807,13 @@ "description": "The last update timestamp of this resource, at which the resource was last modified or deleted. The granularity is in seconds. Timestamp.nanos will always be 0. This field is available only when the resource's proto contains it. To search against `update_time`: * use a field query. - value in seconds since unix epoch. Example: `updateTime < 1609459200` - value in date string. Example: `updateTime < 2021-01-01` - value in date-time string (must be quoted). Example: `updateTime < \"2021-01-01T00:00:00\"`", "format": "google-datetime", "type": "string" + }, + "versionedResources": { + "description": "Versioned resource representations of this resource. This is repeated because there could be multiple versions of resource representations during version migration. This `versioned_resources` field is not searchable. Some attributes of the resource representations are exposed in `additional_attributes` field, so as to allow users to search on them.", + "items": { + "$ref": "VersionedResource" + }, + "type": "array" } }, "type": "object" @@ -3011,6 +3024,25 @@ }, "type": "object" }, + "VersionedResource": { + "description": "Resource representation as defined by the corresponding service providing the resource for a given API version.", + "id": "VersionedResource", + "properties": { + "resource": { + "additionalProperties": { + "description": "Properties of the object.", + "type": "any" + }, + "description": "JSON representation of the resource as defined by the corresponding service providing this resource. Example: If the resource is an instance provided by Compute Engine, this field will contain the JSON representation of the instance as defined by Compute Engine: `https://cloud.google.com/compute/docs/reference/rest/v1/instances`. You can find the resource definition for each supported resource type in this table: `https://cloud.google.com/asset-inventory/docs/supported-asset-types#searchable_asset_types`", + "type": "object" + }, + "version": { + "description": "API version of the resource. Example: If the resource is an instance provided by Compute Engine v1 API as defined in `https://cloud.google.com/compute/docs/reference/rest/v1/instances`, version will be \"v1\".", + "type": "string" + } + }, + "type": "object" + }, "WindowsQuickFixEngineeringPackage": { "description": "Information related to a Quick Fix Engineering package. Fields are taken from Windows QuickFixEngineering Interface and match the source names: https://docs.microsoft.com/en-us/windows/win32/cimwin32prov/win32-quickfixengineering", "id": "WindowsQuickFixEngineeringPackage", diff --git a/googleapiclient/discovery_cache/documents/cloudasset.v1beta1.json b/googleapiclient/discovery_cache/documents/cloudasset.v1beta1.json index cd032ed717d..82c866c39ae 100644 --- a/googleapiclient/discovery_cache/documents/cloudasset.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudasset.v1beta1.json @@ -411,7 +411,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://cloudasset.googleapis.com/", "schemas": { "AnalyzeIamPolicyLongrunningResponse": { diff --git a/googleapiclient/discovery_cache/documents/cloudasset.v1p1beta1.json b/googleapiclient/discovery_cache/documents/cloudasset.v1p1beta1.json index adc158f90f8..92519a47f76 100644 --- a/googleapiclient/discovery_cache/documents/cloudasset.v1p1beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudasset.v1p1beta1.json @@ -207,7 +207,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://cloudasset.googleapis.com/", "schemas": { "AnalyzeIamPolicyLongrunningResponse": { diff --git a/googleapiclient/discovery_cache/documents/cloudasset.v1p4beta1.json b/googleapiclient/discovery_cache/documents/cloudasset.v1p4beta1.json index 0c9d6d40c1a..13ade5dd316 100644 --- a/googleapiclient/discovery_cache/documents/cloudasset.v1p4beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudasset.v1p4beta1.json @@ -221,7 +221,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://cloudasset.googleapis.com/", "schemas": { "AccessSelector": { diff --git a/googleapiclient/discovery_cache/documents/cloudasset.v1p5beta1.json b/googleapiclient/discovery_cache/documents/cloudasset.v1p5beta1.json index fa11eab147f..3a584fe8b4b 100644 --- a/googleapiclient/discovery_cache/documents/cloudasset.v1p5beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudasset.v1p5beta1.json @@ -177,7 +177,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://cloudasset.googleapis.com/", "schemas": { "AnalyzeIamPolicyLongrunningResponse": { diff --git a/googleapiclient/discovery_cache/documents/cloudasset.v1p7beta1.json b/googleapiclient/discovery_cache/documents/cloudasset.v1p7beta1.json index cb029dcc0f3..d3da6a37e29 100644 --- a/googleapiclient/discovery_cache/documents/cloudasset.v1p7beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudasset.v1p7beta1.json @@ -167,7 +167,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://cloudasset.googleapis.com/", "schemas": { "AnalyzeIamPolicyLongrunningResponse": { diff --git a/googleapiclient/discovery_cache/documents/cloudbuild.v1.json b/googleapiclient/discovery_cache/documents/cloudbuild.v1.json index e618e43a0a2..3108bc5b2f4 100644 --- a/googleapiclient/discovery_cache/documents/cloudbuild.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudbuild.v1.json @@ -835,6 +835,188 @@ } } } + }, + "workerPools": { + "methods": { + "create": { + "description": "Creates a `WorkerPool`.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workerPools", + "httpMethod": "POST", + "id": "cloudbuild.projects.locations.workerPools.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The parent resource where this worker pool will be created. Format: `projects/{project}/locations/{location}`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + }, + "validateOnly": { + "description": "If set, validate the request and preview the response, but do not actually post it.", + "location": "query", + "type": "boolean" + }, + "workerPoolId": { + "description": "Required. Immutable. The ID to use for the `WorkerPool`, which will become the final component of the resource name. This value should be 1-63 characters, and valid characters are /a-z-/.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+parent}/workerPools", + "request": { + "$ref": "WorkerPool" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes a `WorkerPool`.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workerPools/{workerPoolsId}", + "httpMethod": "DELETE", + "id": "cloudbuild.projects.locations.workerPools.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "allowMissing": { + "description": "If set to true, and the `WorkerPool` is not found, the request will succeed but no action will be taken on the server.", + "location": "query", + "type": "boolean" + }, + "etag": { + "description": "Optional. If this is provided, it must match the server's etag on the workerpool for the request to be processed.", + "location": "query", + "type": "string" + }, + "name": { + "description": "Required. The name of the `WorkerPool` to delete. Format: `projects/{project}/locations/{workerPool}/workerPools/{workerPool}`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/workerPools/[^/]+$", + "required": true, + "type": "string" + }, + "validateOnly": { + "description": "If set, validate the request and preview the response, but do not actually post it.", + "location": "query", + "type": "boolean" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Returns details of a `WorkerPool`.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workerPools/{workerPoolsId}", + "httpMethod": "GET", + "id": "cloudbuild.projects.locations.workerPools.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the `WorkerPool` to retrieve. Format: `projects/{project}/locations/{location}/workerPools/{workerPool}`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/workerPools/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "WorkerPool" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists `WorkerPool`s.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workerPools", + "httpMethod": "GET", + "id": "cloudbuild.projects.locations.workerPools.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "The maximum number of `WorkerPool`s to return. The service may return fewer than this value. If omitted, the server will use a sensible default.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "A page token, received from a previous `ListWorkerPools` call. Provide this to retrieve the subsequent page.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The parent of the collection of `WorkerPools`. Format: `projects/{project}/locations/location`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/workerPools", + "response": { + "$ref": "ListWorkerPoolsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "description": "Updates a `WorkerPool`.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workerPools/{workerPoolsId}", + "httpMethod": "PATCH", + "id": "cloudbuild.projects.locations.workerPools.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Output only. The resource name of the `WorkerPool`, with format `projects/{project}/locations/{location}/workerPools/{worker_pool}`. The value of `{worker_pool}` is provided by `worker_pool_id` in `CreateWorkerPool` request and the value of `{location}` is determined by the endpoint accessed.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/workerPools/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "A mask specifying which fields in `worker_pool` to update.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + }, + "validateOnly": { + "description": "If set, validate the request and preview the response, but do not actually post it.", + "location": "query", + "type": "boolean" + } + }, + "path": "v1/{+name}", + "request": { + "$ref": "WorkerPool" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } } } }, @@ -1103,7 +1285,7 @@ } } }, - "revision": "20210613", + "revision": "20210701", "rootUrl": "https://cloudbuild.googleapis.com/", "schemas": { "ArtifactObjects": { @@ -1425,6 +1607,10 @@ ], "type": "string" }, + "pool": { + "$ref": "PoolOption", + "description": "Optional. Specification for execution on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information." + }, "requestedVerifyOption": { "description": "Requested verifiability options.", "enum": [ @@ -1481,7 +1667,7 @@ "type": "array" }, "workerPool": { - "description": "Option to specify a `WorkerPool` for the build. Format: projects/{project}/locations/{location}/workerPools/{workerPool} This field is in beta and is available only to restricted users.", + "description": "This field deprecated; please use `pool.name` instead.", "type": "string" } }, @@ -1591,6 +1777,10 @@ "description": "Configuration for an automated build in response to source repository changes.", "id": "BuildTrigger", "properties": { + "autodetect": { + "description": "Autodetect build configuration. The following precedence is used (case insensitive): 1. cloudbuild.yaml 2. cloudbuild.yml 3. cloudbuild.json 4. Dockerfile Currently only available for GitHub App Triggers.", + "type": "boolean" + }, "build": { "$ref": "Build", "description": "Contents of the build template." @@ -1722,6 +1912,48 @@ "properties": {}, "type": "object" }, + "CreateWorkerPoolOperationMetadata": { + "description": "Metadata for the `CreateWorkerPool` operation.", + "id": "CreateWorkerPoolOperationMetadata", + "properties": { + "completeTime": { + "description": "Time the operation was completed.", + "format": "google-datetime", + "type": "string" + }, + "createTime": { + "description": "Time the operation was created.", + "format": "google-datetime", + "type": "string" + }, + "workerPool": { + "description": "The resource name of the `WorkerPool` to create. Format: `projects/{project}/locations/{location}/workerPools/{worker_pool}`.", + "type": "string" + } + }, + "type": "object" + }, + "DeleteWorkerPoolOperationMetadata": { + "description": "Metadata for the `DeleteWorkerPool` operation.", + "id": "DeleteWorkerPoolOperationMetadata", + "properties": { + "completeTime": { + "description": "Time the operation was completed.", + "format": "google-datetime", + "type": "string" + }, + "createTime": { + "description": "Time the operation was created.", + "format": "google-datetime", + "type": "string" + }, + "workerPool": { + "description": "The resource name of the `WorkerPool` being deleted. Format: `projects/{project}/locations/{location}/workerPools/{worker_pool}`.", + "type": "string" + } + }, + "type": "object" + }, "Empty": { "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.", "id": "Empty", @@ -1889,6 +2121,49 @@ }, "type": "object" }, + "ListWorkerPoolsResponse": { + "description": "Response containing existing `WorkerPools`.", + "id": "ListWorkerPoolsResponse", + "properties": { + "nextPageToken": { + "description": "Continuation token used to page through large result sets. Provide this value in a subsequent ListWorkerPoolsRequest to return the next page of results.", + "type": "string" + }, + "workerPools": { + "description": "`WorkerPools` for the specified project.", + "items": { + "$ref": "WorkerPool" + }, + "type": "array" + } + }, + "type": "object" + }, + "NetworkConfig": { + "description": "Defines the network configuration for the pool.", + "id": "NetworkConfig", + "properties": { + "egressOption": { + "description": "Option to configure network egress for the workers.", + "enum": [ + "EGRESS_OPTION_UNSPECIFIED", + "NO_PUBLIC_EGRESS", + "PUBLIC_EGRESS" + ], + "enumDescriptions": [ + "If set, defaults to PUBLIC_EGRESS.", + "If set, workers are created without any public address, which prevents network egress to public IPs unless a network proxy is configured.", + "If set, workers are created with a public address which allows for public internet egress." + ], + "type": "string" + }, + "peeredNetwork": { + "description": "Required. Immutable. The network definition that the workers are peered to. If this section is left empty, the workers will be peered to `WorkerPool.project_id` on the service producer network. Must be in the format `projects/{project}/global/networks/{network}`, where `{project}` is a project number, such as `12345`, and `{network}` is the name of a VPC network in the project. See [Understanding network configuration options](https://cloud.google.com/cloud-build/docs/custom-workers/set-up-custom-worker-pool-environment#understanding_the_network_configuration_options)", + "type": "string" + } + }, + "type": "object" + }, "Notification": { "description": "Notification is the container which holds the data that is relevant to this particular notification.", "id": "Notification", @@ -2037,6 +2312,32 @@ }, "type": "object" }, + "PoolOption": { + "description": "Details about how a build should be executed on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information.", + "id": "PoolOption", + "properties": { + "name": { + "description": "The `WorkerPool` resource to execute the build on. You must have `cloudbuild.workerpools.use` on the project hosting the WorkerPool. Format projects/{project}/locations/{location}/workerPools/{workerPoolId}", + "type": "string" + } + }, + "type": "object" + }, + "PrivatePoolV1Config": { + "description": "Configuration for a V1 `PrivatePool`.", + "id": "PrivatePoolV1Config", + "properties": { + "networkConfig": { + "$ref": "NetworkConfig", + "description": "Network configuration for the pool." + }, + "workerConfig": { + "$ref": "WorkerConfig", + "description": "Machine configuration for the workers in the pool." + } + }, + "type": "object" + }, "PubsubConfig": { "description": "PubsubConfig describes the configuration of a trigger that creates a build whenever a Pub/Sub message is published.", "id": "PubsubConfig", @@ -2365,7 +2666,7 @@ }, "storageSourceManifest": { "$ref": "StorageSourceManifest", - "description": "If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview." + "description": "If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher)." } }, "type": "object" @@ -2445,7 +2746,7 @@ "type": "object" }, "StorageSourceManifest": { - "description": "Location of the source manifest in Google Cloud Storage. This feature is in Preview.", + "description": "Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher).", "id": "StorageSourceManifest", "properties": { "bucket": { @@ -2481,6 +2782,27 @@ }, "type": "object" }, + "UpdateWorkerPoolOperationMetadata": { + "description": "Metadata for the `UpdateWorkerPool` operation.", + "id": "UpdateWorkerPoolOperationMetadata", + "properties": { + "completeTime": { + "description": "Time the operation was completed.", + "format": "google-datetime", + "type": "string" + }, + "createTime": { + "description": "Time the operation was created.", + "format": "google-datetime", + "type": "string" + }, + "workerPool": { + "description": "The resource name of the `WorkerPool` being updated. Format: `projects/{project}/locations/{location}/workerPools/{worker_pool}`.", + "type": "string" + } + }, + "type": "object" + }, "Volume": { "description": "Volume describes a Docker container volume which is mounted into build steps in order to persist files across build step execution.", "id": "Volume", @@ -2547,6 +2869,96 @@ } }, "type": "object" + }, + "WorkerConfig": { + "description": "Defines the configuration to be used for creating workers in the pool.", + "id": "WorkerConfig", + "properties": { + "diskSizeGb": { + "description": "Size of the disk attached to the worker, in GB. See [Worker pool config file](https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size.", + "format": "int64", + "type": "string" + }, + "machineType": { + "description": "Machine type of a worker, such as `e2-medium`. See [Worker pool config file](https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use a sensible default.", + "type": "string" + } + }, + "type": "object" + }, + "WorkerPool": { + "description": "Configuration for a `WorkerPool`. Cloud Build owns and maintains a pool of workers for general use and have no access to a project's private network. By default, builds submitted to Cloud Build will use a worker from this pool. If your build needs access to resources on a private network, create and use a `WorkerPool` to run your builds. Private `WorkerPool`s give your builds access to any single VPC network that you administer, including any on-prem resources connected to that VPC network. For an overview of custom worker pools, see [Custom workers overview](https://cloud.google.com/cloud-build/docs/custom-workers/custom-workers-overview).", + "id": "WorkerPool", + "properties": { + "annotations": { + "additionalProperties": { + "type": "string" + }, + "description": "User specified annotations. See https://google.aip.dev/128#annotations for more details such as format and size limitations.", + "type": "object" + }, + "createTime": { + "description": "Output only. Time at which the request to create the `WorkerPool` was received.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "deleteTime": { + "description": "Output only. Time at which the request to delete the `WorkerPool` was received.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "displayName": { + "description": "A user-specified, human-readable name for the `WorkerPool`. If provided, this value must be 1-63 characters.", + "type": "string" + }, + "etag": { + "description": "Output only. Checksum computed by the server. May be sent on update and delete requests to ensure that the client has an up-to-date value before proceeding.", + "readOnly": true, + "type": "string" + }, + "name": { + "description": "Output only. The resource name of the `WorkerPool`, with format `projects/{project}/locations/{location}/workerPools/{worker_pool}`. The value of `{worker_pool}` is provided by `worker_pool_id` in `CreateWorkerPool` request and the value of `{location}` is determined by the endpoint accessed.", + "readOnly": true, + "type": "string" + }, + "privatePoolV1Config": { + "$ref": "PrivatePoolV1Config", + "description": "Private Pool using a v1 configuration." + }, + "state": { + "description": "Output only. `WorkerPool` state.", + "enum": [ + "STATE_UNSPECIFIED", + "CREATING", + "RUNNING", + "DELETING", + "DELETED" + ], + "enumDescriptions": [ + "State of the `WorkerPool` is unknown.", + "`WorkerPool` is being created.", + "`WorkerPool` is running.", + "`WorkerPool` is being deleted: cancelling builds and draining workers.", + "`WorkerPool` is deleted." + ], + "readOnly": true, + "type": "string" + }, + "uid": { + "description": "Output only. A unique identifier for the `WorkerPool`.", + "readOnly": true, + "type": "string" + }, + "updateTime": { + "description": "Output only. Time at which the request to update the `WorkerPool` was received.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + } + }, + "type": "object" } }, "servicePath": "", diff --git a/googleapiclient/discovery_cache/documents/cloudbuild.v1alpha1.json b/googleapiclient/discovery_cache/documents/cloudbuild.v1alpha1.json index 29f786d01a9..0bf0bf08e73 100644 --- a/googleapiclient/discovery_cache/documents/cloudbuild.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/cloudbuild.v1alpha1.json @@ -306,7 +306,7 @@ } } }, - "revision": "20210613", + "revision": "20210701", "rootUrl": "https://cloudbuild.googleapis.com/", "schemas": { "ArtifactObjects": { @@ -628,6 +628,10 @@ ], "type": "string" }, + "pool": { + "$ref": "PoolOption", + "description": "Optional. Specification for execution on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information." + }, "requestedVerifyOption": { "description": "Requested verifiability options.", "enum": [ @@ -684,7 +688,7 @@ "type": "array" }, "workerPool": { - "description": "Option to specify a `WorkerPool` for the build. Format: projects/{project}/locations/{location}/workerPools/{workerPool} This field is in beta and is available only to restricted users.", + "description": "This field deprecated; please use `pool.name` instead.", "type": "string" } }, @@ -816,6 +820,48 @@ "properties": {}, "type": "object" }, + "CreateWorkerPoolOperationMetadata": { + "description": "Metadata for the `CreateWorkerPool` operation.", + "id": "CreateWorkerPoolOperationMetadata", + "properties": { + "completeTime": { + "description": "Time the operation was completed.", + "format": "google-datetime", + "type": "string" + }, + "createTime": { + "description": "Time the operation was created.", + "format": "google-datetime", + "type": "string" + }, + "workerPool": { + "description": "The resource name of the `WorkerPool` to create. Format: `projects/{project}/locations/{location}/workerPools/{worker_pool}`.", + "type": "string" + } + }, + "type": "object" + }, + "DeleteWorkerPoolOperationMetadata": { + "description": "Metadata for the `DeleteWorkerPool` operation.", + "id": "DeleteWorkerPoolOperationMetadata", + "properties": { + "completeTime": { + "description": "Time the operation was completed.", + "format": "google-datetime", + "type": "string" + }, + "createTime": { + "description": "Time the operation was created.", + "format": "google-datetime", + "type": "string" + }, + "workerPool": { + "description": "The resource name of the `WorkerPool` being deleted. Format: `projects/{project}/locations/{location}/workerPools/{worker_pool}`.", + "type": "string" + } + }, + "type": "object" + }, "Empty": { "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.", "id": "Empty", @@ -1073,6 +1119,17 @@ }, "type": "object" }, + "PoolOption": { + "description": "Details about how a build should be executed on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information.", + "id": "PoolOption", + "properties": { + "name": { + "description": "The `WorkerPool` resource to execute the build on. You must have `cloudbuild.workerpools.use` on the project hosting the WorkerPool. Format projects/{project}/locations/{location}/workerPools/{workerPoolId}", + "type": "string" + } + }, + "type": "object" + }, "RepoSource": { "description": "Location of the source in a Google Cloud Source Repository.", "id": "RepoSource", @@ -1271,7 +1328,7 @@ }, "storageSourceManifest": { "$ref": "StorageSourceManifest", - "description": "If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview." + "description": "If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher)." } }, "type": "object" @@ -1351,7 +1408,7 @@ "type": "object" }, "StorageSourceManifest": { - "description": "Location of the source manifest in Google Cloud Storage. This feature is in Preview.", + "description": "Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher).", "id": "StorageSourceManifest", "properties": { "bucket": { @@ -1387,6 +1444,27 @@ }, "type": "object" }, + "UpdateWorkerPoolOperationMetadata": { + "description": "Metadata for the `UpdateWorkerPool` operation.", + "id": "UpdateWorkerPoolOperationMetadata", + "properties": { + "completeTime": { + "description": "Time the operation was completed.", + "format": "google-datetime", + "type": "string" + }, + "createTime": { + "description": "Time the operation was created.", + "format": "google-datetime", + "type": "string" + }, + "workerPool": { + "description": "The resource name of the `WorkerPool` being updated. Format: `projects/{project}/locations/{location}/workerPools/{worker_pool}`.", + "type": "string" + } + }, + "type": "object" + }, "Volume": { "description": "Volume describes a Docker container volume which is mounted into build steps in order to persist files across build step execution.", "id": "Volume", diff --git a/googleapiclient/discovery_cache/documents/cloudbuild.v1alpha2.json b/googleapiclient/discovery_cache/documents/cloudbuild.v1alpha2.json index deb93e540bd..9e0df1a82a2 100644 --- a/googleapiclient/discovery_cache/documents/cloudbuild.v1alpha2.json +++ b/googleapiclient/discovery_cache/documents/cloudbuild.v1alpha2.json @@ -317,7 +317,7 @@ } } }, - "revision": "20210613", + "revision": "20210701", "rootUrl": "https://cloudbuild.googleapis.com/", "schemas": { "ArtifactObjects": { @@ -639,6 +639,10 @@ ], "type": "string" }, + "pool": { + "$ref": "PoolOption", + "description": "Optional. Specification for execution on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information." + }, "requestedVerifyOption": { "description": "Requested verifiability options.", "enum": [ @@ -695,7 +699,7 @@ "type": "array" }, "workerPool": { - "description": "Option to specify a `WorkerPool` for the build. Format: projects/{project}/locations/{location}/workerPools/{workerPool} This field is in beta and is available only to restricted users.", + "description": "This field deprecated; please use `pool.name` instead.", "type": "string" } }, @@ -827,6 +831,48 @@ "properties": {}, "type": "object" }, + "CreateWorkerPoolOperationMetadata": { + "description": "Metadata for the `CreateWorkerPool` operation.", + "id": "CreateWorkerPoolOperationMetadata", + "properties": { + "completeTime": { + "description": "Time the operation was completed.", + "format": "google-datetime", + "type": "string" + }, + "createTime": { + "description": "Time the operation was created.", + "format": "google-datetime", + "type": "string" + }, + "workerPool": { + "description": "The resource name of the `WorkerPool` to create. Format: `projects/{project}/locations/{location}/workerPools/{worker_pool}`.", + "type": "string" + } + }, + "type": "object" + }, + "DeleteWorkerPoolOperationMetadata": { + "description": "Metadata for the `DeleteWorkerPool` operation.", + "id": "DeleteWorkerPoolOperationMetadata", + "properties": { + "completeTime": { + "description": "Time the operation was completed.", + "format": "google-datetime", + "type": "string" + }, + "createTime": { + "description": "Time the operation was created.", + "format": "google-datetime", + "type": "string" + }, + "workerPool": { + "description": "The resource name of the `WorkerPool` being deleted. Format: `projects/{project}/locations/{location}/workerPools/{worker_pool}`.", + "type": "string" + } + }, + "type": "object" + }, "Empty": { "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.", "id": "Empty", @@ -1076,6 +1122,17 @@ }, "type": "object" }, + "PoolOption": { + "description": "Details about how a build should be executed on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information.", + "id": "PoolOption", + "properties": { + "name": { + "description": "The `WorkerPool` resource to execute the build on. You must have `cloudbuild.workerpools.use` on the project hosting the WorkerPool. Format projects/{project}/locations/{location}/workerPools/{workerPoolId}", + "type": "string" + } + }, + "type": "object" + }, "RepoSource": { "description": "Location of the source in a Google Cloud Source Repository.", "id": "RepoSource", @@ -1274,7 +1331,7 @@ }, "storageSourceManifest": { "$ref": "StorageSourceManifest", - "description": "If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview." + "description": "If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher)." } }, "type": "object" @@ -1354,7 +1411,7 @@ "type": "object" }, "StorageSourceManifest": { - "description": "Location of the source manifest in Google Cloud Storage. This feature is in Preview.", + "description": "Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher).", "id": "StorageSourceManifest", "properties": { "bucket": { @@ -1390,6 +1447,27 @@ }, "type": "object" }, + "UpdateWorkerPoolOperationMetadata": { + "description": "Metadata for the `UpdateWorkerPool` operation.", + "id": "UpdateWorkerPoolOperationMetadata", + "properties": { + "completeTime": { + "description": "Time the operation was completed.", + "format": "google-datetime", + "type": "string" + }, + "createTime": { + "description": "Time the operation was created.", + "format": "google-datetime", + "type": "string" + }, + "workerPool": { + "description": "The resource name of the `WorkerPool` being updated. Format: `projects/{project}/locations/{location}/workerPools/{worker_pool}`.", + "type": "string" + } + }, + "type": "object" + }, "Volume": { "description": "Volume describes a Docker container volume which is mounted into build steps in order to persist files across build step execution.", "id": "Volume", diff --git a/googleapiclient/discovery_cache/documents/cloudbuild.v1beta1.json b/googleapiclient/discovery_cache/documents/cloudbuild.v1beta1.json index 55283f90ee5..af4c301e94f 100644 --- a/googleapiclient/discovery_cache/documents/cloudbuild.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudbuild.v1beta1.json @@ -317,7 +317,7 @@ } } }, - "revision": "20210613", + "revision": "20210701", "rootUrl": "https://cloudbuild.googleapis.com/", "schemas": { "ArtifactObjects": { @@ -639,6 +639,10 @@ ], "type": "string" }, + "pool": { + "$ref": "PoolOption", + "description": "Optional. Specification for execution on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information." + }, "requestedVerifyOption": { "description": "Requested verifiability options.", "enum": [ @@ -695,7 +699,7 @@ "type": "array" }, "workerPool": { - "description": "Option to specify a `WorkerPool` for the build. Format: projects/{project}/locations/{location}/workerPools/{workerPool} This field is in beta and is available only to restricted users.", + "description": "This field deprecated; please use `pool.name` instead.", "type": "string" } }, @@ -827,6 +831,48 @@ "properties": {}, "type": "object" }, + "CreateWorkerPoolOperationMetadata": { + "description": "Metadata for the `CreateWorkerPool` operation.", + "id": "CreateWorkerPoolOperationMetadata", + "properties": { + "completeTime": { + "description": "Time the operation was completed.", + "format": "google-datetime", + "type": "string" + }, + "createTime": { + "description": "Time the operation was created.", + "format": "google-datetime", + "type": "string" + }, + "workerPool": { + "description": "The resource name of the `WorkerPool` to create. Format: `projects/{project}/locations/{location}/workerPools/{worker_pool}`.", + "type": "string" + } + }, + "type": "object" + }, + "DeleteWorkerPoolOperationMetadata": { + "description": "Metadata for the `DeleteWorkerPool` operation.", + "id": "DeleteWorkerPoolOperationMetadata", + "properties": { + "completeTime": { + "description": "Time the operation was completed.", + "format": "google-datetime", + "type": "string" + }, + "createTime": { + "description": "Time the operation was created.", + "format": "google-datetime", + "type": "string" + }, + "workerPool": { + "description": "The resource name of the `WorkerPool` being deleted. Format: `projects/{project}/locations/{location}/workerPools/{worker_pool}`.", + "type": "string" + } + }, + "type": "object" + }, "Empty": { "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.", "id": "Empty", @@ -1076,6 +1122,17 @@ }, "type": "object" }, + "PoolOption": { + "description": "Details about how a build should be executed on a `WorkerPool`. See [running builds in a custom worker pool](https://cloud.google.com/build/docs/custom-workers/run-builds-in-custom-worker-pool) for more information.", + "id": "PoolOption", + "properties": { + "name": { + "description": "The `WorkerPool` resource to execute the build on. You must have `cloudbuild.workerpools.use` on the project hosting the WorkerPool. Format projects/{project}/locations/{location}/workerPools/{workerPoolId}", + "type": "string" + } + }, + "type": "object" + }, "RepoSource": { "description": "Location of the source in a Google Cloud Source Repository.", "id": "RepoSource", @@ -1274,7 +1331,7 @@ }, "storageSourceManifest": { "$ref": "StorageSourceManifest", - "description": "If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview." + "description": "If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher)." } }, "type": "object" @@ -1354,7 +1411,7 @@ "type": "object" }, "StorageSourceManifest": { - "description": "Location of the source manifest in Google Cloud Storage. This feature is in Preview.", + "description": "Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher).", "id": "StorageSourceManifest", "properties": { "bucket": { @@ -1390,6 +1447,27 @@ }, "type": "object" }, + "UpdateWorkerPoolOperationMetadata": { + "description": "Metadata for the `UpdateWorkerPool` operation.", + "id": "UpdateWorkerPoolOperationMetadata", + "properties": { + "completeTime": { + "description": "Time the operation was completed.", + "format": "google-datetime", + "type": "string" + }, + "createTime": { + "description": "Time the operation was created.", + "format": "google-datetime", + "type": "string" + }, + "workerPool": { + "description": "The resource name of the `WorkerPool` being updated. Format: `projects/{project}/locations/{location}/workerPools/{worker_pool}`.", + "type": "string" + } + }, + "type": "object" + }, "Volume": { "description": "Volume describes a Docker container volume which is mounted into build steps in order to persist files across build step execution.", "id": "Volume", diff --git a/googleapiclient/discovery_cache/documents/cloudchannel.v1.json b/googleapiclient/discovery_cache/documents/cloudchannel.v1.json index c1731b38e89..23de307dcc4 100644 --- a/googleapiclient/discovery_cache/documents/cloudchannel.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudchannel.v1.json @@ -1533,7 +1533,7 @@ } } }, - "revision": "20210625", + "revision": "20210629", "rootUrl": "https://cloudchannel.googleapis.com/", "schemas": { "GoogleCloudChannelV1ActivateEntitlementRequest": { @@ -1856,15 +1856,15 @@ "type": "string" }, "email": { - "description": "The customer account's contact email. Required for entitlements that create admin.google.com accounts, and serves as the customer's username for those accounts.", + "description": "The customer account's contact email. Required for entitlements that create admin.google.com accounts, and serves as the customer's username for those accounts. Use this email to invite Team customers.", "type": "string" }, "firstName": { - "description": "The customer account contact's first name.", + "description": "The customer account contact's first name. Optional for Team customers.", "type": "string" }, "lastName": { - "description": "The customer account contact's last name.", + "description": "The customer account contact's last name. Optional for Team customers.", "type": "string" }, "phone": { @@ -1898,7 +1898,7 @@ "id": "GoogleCloudChannelV1Customer", "properties": { "alternateEmail": { - "description": "Secondary contact email. You need to provide an alternate email to create different domains if a primary contact email already exists. Users will receive a notification with credentials when you create an admin.google.com account. Secondary emails are also recovery email addresses.", + "description": "Secondary contact email. You need to provide an alternate email to create different domains if a primary contact email already exists. Users will receive a notification with credentials when you create an admin.google.com account. Secondary emails are also recovery email addresses. Alternate emails are optional when you create Team customers.", "type": "string" }, "channelPartnerId": { diff --git a/googleapiclient/discovery_cache/documents/clouderrorreporting.v1beta1.json b/googleapiclient/discovery_cache/documents/clouderrorreporting.v1beta1.json index bd89208875a..b28d1d52e73 100644 --- a/googleapiclient/discovery_cache/documents/clouderrorreporting.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/clouderrorreporting.v1beta1.json @@ -430,7 +430,7 @@ } } }, - "revision": "20210616", + "revision": "20210624", "rootUrl": "https://clouderrorreporting.googleapis.com/", "schemas": { "DeleteEventsResponse": { diff --git a/googleapiclient/discovery_cache/documents/cloudfunctions.v1.json b/googleapiclient/discovery_cache/documents/cloudfunctions.v1.json index 2527ac3870c..6fd861eef8c 100644 --- a/googleapiclient/discovery_cache/documents/cloudfunctions.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudfunctions.v1.json @@ -546,7 +546,7 @@ } } }, - "revision": "20210621", + "revision": "20210624", "rootUrl": "https://cloudfunctions.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/cloudidentity.v1.json b/googleapiclient/discovery_cache/documents/cloudidentity.v1.json index 185fb8b0760..7c92ec0f18d 100644 --- a/googleapiclient/discovery_cache/documents/cloudidentity.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudidentity.v1.json @@ -851,7 +851,7 @@ "type": "string" }, "updateMask": { - "description": "Required. The fully-qualified names of fields to update. May only contain the following fields: `display_name`, `description`, `labels`.", + "description": "Required. The names of fields to update. May only contain the following fields: `display_name`, `description`, `labels`.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -1273,7 +1273,7 @@ } } }, - "revision": "20210623", + "revision": "20210629", "rootUrl": "https://cloudidentity.googleapis.com/", "schemas": { "CheckTransitiveMembershipResponse": { diff --git a/googleapiclient/discovery_cache/documents/cloudidentity.v1beta1.json b/googleapiclient/discovery_cache/documents/cloudidentity.v1beta1.json index 630912fc275..a55e4fcfbf4 100644 --- a/googleapiclient/discovery_cache/documents/cloudidentity.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudidentity.v1beta1.json @@ -918,7 +918,7 @@ "type": "string" }, "updateMask": { - "description": "Required. The fully-qualified names of fields to update. May only contain the following fields: `display_name`, `description`, `labels`.", + "description": "Required. The names of fields to update. May only contain the following fields: `display_name`, `description`, `labels`.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -1336,7 +1336,7 @@ } } }, - "revision": "20210623", + "revision": "20210629", "rootUrl": "https://cloudidentity.googleapis.com/", "schemas": { "AndroidAttributes": { diff --git a/googleapiclient/discovery_cache/documents/cloudiot.v1.json b/googleapiclient/discovery_cache/documents/cloudiot.v1.json index b09fc9e9890..6ad7a1f2fc4 100644 --- a/googleapiclient/discovery_cache/documents/cloudiot.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudiot.v1.json @@ -938,7 +938,7 @@ } } }, - "revision": "20210615", + "revision": "20210622", "rootUrl": "https://cloudiot.googleapis.com/", "schemas": { "BindDeviceToGatewayRequest": { diff --git a/googleapiclient/discovery_cache/documents/cloudkms.v1.json b/googleapiclient/discovery_cache/documents/cloudkms.v1.json index 53c4b30decd..6b289e55664 100644 --- a/googleapiclient/discovery_cache/documents/cloudkms.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudkms.v1.json @@ -1259,7 +1259,7 @@ } } }, - "revision": "20210622", + "revision": "20210625", "rootUrl": "https://cloudkms.googleapis.com/", "schemas": { "AsymmetricDecryptRequest": { diff --git a/googleapiclient/discovery_cache/documents/cloudprofiler.v2.json b/googleapiclient/discovery_cache/documents/cloudprofiler.v2.json index af519be75e0..112b43ff893 100644 --- a/googleapiclient/discovery_cache/documents/cloudprofiler.v2.json +++ b/googleapiclient/discovery_cache/documents/cloudprofiler.v2.json @@ -216,7 +216,7 @@ } } }, - "revision": "20210618", + "revision": "20210626", "rootUrl": "https://cloudprofiler.googleapis.com/", "schemas": { "CreateProfileRequest": { diff --git a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1.json b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1.json index d190749f049..f54b9a3e5fd 100644 --- a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1.json @@ -1171,7 +1171,7 @@ } } }, - "revision": "20210613", + "revision": "20210621", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "Ancestor": { diff --git a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1beta1.json b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1beta1.json index 15279091b60..d989748b00b 100644 --- a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1beta1.json @@ -566,7 +566,7 @@ } } }, - "revision": "20210613", + "revision": "20210621", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "Ancestor": { diff --git a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2.json b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2.json index 6ce5d7a0394..eba52f5db22 100644 --- a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2.json +++ b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2.json @@ -450,7 +450,7 @@ } } }, - "revision": "20210613", + "revision": "20210621", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2beta1.json b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2beta1.json index 4ba9388f6ae..48159ca1f18 100644 --- a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2beta1.json @@ -450,7 +450,7 @@ } } }, - "revision": "20210613", + "revision": "20210621", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v3.json b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v3.json index 3751c9de9a1..01ff3f263f2 100644 --- a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v3.json +++ b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v3.json @@ -1612,7 +1612,7 @@ } } }, - "revision": "20210613", + "revision": "20210621", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/cloudsearch.v1.json b/googleapiclient/discovery_cache/documents/cloudsearch.v1.json index 8c88e26f8bd..78f58eb722e 100644 --- a/googleapiclient/discovery_cache/documents/cloudsearch.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudsearch.v1.json @@ -1916,7 +1916,7 @@ } } }, - "revision": "20210614", + "revision": "20210622", "rootUrl": "https://cloudsearch.googleapis.com/", "schemas": { "AuditLoggingSettings": { diff --git a/googleapiclient/discovery_cache/documents/cloudshell.v1.json b/googleapiclient/discovery_cache/documents/cloudshell.v1.json index 88492e477b3..f788aa9b387 100644 --- a/googleapiclient/discovery_cache/documents/cloudshell.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudshell.v1.json @@ -374,7 +374,7 @@ } } }, - "revision": "20210618", + "revision": "20210628", "rootUrl": "https://cloudshell.googleapis.com/", "schemas": { "AddPublicKeyMetadata": { diff --git a/googleapiclient/discovery_cache/documents/cloudtrace.v1.json b/googleapiclient/discovery_cache/documents/cloudtrace.v1.json index 5a2ecaf7d1e..01563d0c103 100644 --- a/googleapiclient/discovery_cache/documents/cloudtrace.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudtrace.v1.json @@ -257,7 +257,7 @@ } } }, - "revision": "20210604", + "revision": "20210625", "rootUrl": "https://cloudtrace.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/cloudtrace.v2.json b/googleapiclient/discovery_cache/documents/cloudtrace.v2.json index 8b7b1b88a6e..9ae93dc1854 100644 --- a/googleapiclient/discovery_cache/documents/cloudtrace.v2.json +++ b/googleapiclient/discovery_cache/documents/cloudtrace.v2.json @@ -181,7 +181,7 @@ } } }, - "revision": "20210604", + "revision": "20210625", "rootUrl": "https://cloudtrace.googleapis.com/", "schemas": { "Annotation": { diff --git a/googleapiclient/discovery_cache/documents/cloudtrace.v2beta1.json b/googleapiclient/discovery_cache/documents/cloudtrace.v2beta1.json index d0987cd953c..b2aec4f4ec5 100644 --- a/googleapiclient/discovery_cache/documents/cloudtrace.v2beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudtrace.v2beta1.json @@ -273,7 +273,7 @@ } } }, - "revision": "20210604", + "revision": "20210625", "rootUrl": "https://cloudtrace.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/composer.v1.json b/googleapiclient/discovery_cache/documents/composer.v1.json index 5692ec14204..8a1e9a08ff6 100644 --- a/googleapiclient/discovery_cache/documents/composer.v1.json +++ b/googleapiclient/discovery_cache/documents/composer.v1.json @@ -406,7 +406,7 @@ } } }, - "revision": "20210614", + "revision": "20210625", "rootUrl": "https://composer.googleapis.com/", "schemas": { "AllowedIpRange": { @@ -448,10 +448,21 @@ "readOnly": true, "type": "string" }, + "imageVersion": { + "description": "Composer image for which the build was happening.", + "type": "string" + }, "pypiConflictBuildLogExtract": { "description": "Output only. Extract from a docker image build log containing information about pypi modules conflicts.", "readOnly": true, "type": "string" + }, + "pypiDependencies": { + "additionalProperties": { + "type": "string" + }, + "description": "Pypi dependencies specified in the environment configuration, at the time when the build was triggered.", + "type": "object" } }, "type": "object" diff --git a/googleapiclient/discovery_cache/documents/composer.v1beta1.json b/googleapiclient/discovery_cache/documents/composer.v1beta1.json index 370c117cc56..49abe1621a6 100644 --- a/googleapiclient/discovery_cache/documents/composer.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/composer.v1beta1.json @@ -270,7 +270,7 @@ "type": "string" }, "updateMask": { - "description": "Required. A comma-separated list of paths, relative to `Environment`, of fields to update. For example, to set the version of scikit-learn to install in the environment to 0.19.0 and to remove an existing installation of argparse, the `updateMask` parameter would include the following two `paths` values: \"config.softwareConfig.pypiPackages.scikit-learn\" and \"config.softwareConfig.pypiPackages.argparse\". The included patch environment would specify the scikit-learn version as follows: { \"config\":{ \"softwareConfig\":{ \"pypiPackages\":{ \"scikit-learn\":\"==0.19.0\" } } } } Note that in the above example, any existing PyPI packages other than scikit-learn and argparse will be unaffected. Only one update type may be included in a single request's `updateMask`. For example, one cannot update both the PyPI packages and labels in the same request. However, it is possible to update multiple members of a map field simultaneously in the same request. For example, to set the labels \"label1\" and \"label2\" while clearing \"label3\" (assuming it already exists), one can provide the paths \"labels.label1\", \"labels.label2\", and \"labels.label3\" and populate the patch environment as follows: { \"labels\":{ \"label1\":\"new-label1-value\" \"label2\":\"new-label2-value\" } } Note that in the above example, any existing labels that are not included in the `updateMask` will be unaffected. It is also possible to replace an entire map field by providing the map field's path in the `updateMask`. The new value of the field will be that which is provided in the patch environment. For example, to delete all pre-existing user-specified PyPI packages and install botocore at version 1.7.14, the `updateMask` would contain the path \"config.softwareConfig.pypiPackages\", and the patch environment would be the following: { \"config\":{ \"softwareConfig\":{ \"pypiPackages\":{ \"botocore\":\"==1.7.14\" } } } } *Note:* Only the following fields can be updated: * config.softwareConfig.pypiPackages * Replace all custom custom PyPI packages. If a replacement package map is not included in `environment`, all custom PyPI packages are cleared. It is an error to provide both this mask and a mask specifying an individual package. * config.softwareConfig.pypiPackages.packagename * Update the custom PyPI package packagename, preserving other packages. To delete the package, include it in `updateMask`, and omit the mapping for it in `environment.config.softwareConfig.pypiPackages`. It is an error to provide both a mask of this form and the \"config.softwareConfig.pypiPackages\" mask. * labels * Replace all environment labels. If a replacement labels map is not included in `environment`, all labels are cleared. It is an error to provide both this mask and a mask specifying one or more individual labels. * labels.labelName * Set the label named labelName, while preserving other labels. To delete the label, include it in `updateMask` and omit its mapping in `environment.labels`. It is an error to provide both a mask of this form and the \"labels\" mask. * config.nodeCount * Horizontally scale the number of nodes in the environment. An integer greater than or equal to 3 must be provided in the `config.nodeCount` field. * config.webServerNetworkAccessControl * Replace the environment's current WebServerNetworkAccessControl. * config.softwareConfig.airflowConfigOverrides * Replace all Apache Airflow config overrides. If a replacement config overrides map is not included in `environment`, all config overrides are cleared. It is an error to provide both this mask and a mask specifying one or more individual config overrides. * config.softwareConfig.airflowConfigOverrides.section- name * Override the Apache Airflow config property name in the section named section, preserving other properties. To delete the property override, include it in `updateMask` and omit its mapping in `environment.config.softwareConfig.airflowConfigOverrides`. It is an error to provide both a mask of this form and the \"config.softwareConfig.airflowConfigOverrides\" mask. * config.softwareConfig.envVariables * Replace all environment variables. If a replacement environment variable map is not included in `environment`, all custom environment variables are cleared. It is an error to provide both this mask and a mask specifying one or more individual environment variables. * config.softwareConfig.imageVersion * Upgrade the version of the environment in-place. Refer to `SoftwareConfig.image_version` for information on how to format the new image version. Additionally, the new image version cannot effect a version downgrade and must match the current image version's Composer major version and Airflow major and minor versions. Consult the Cloud Composer Version List for valid values. * config.databaseConfig.machineType * Cloud SQL machine type used by Airflow database. It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 or db-n1-standard-16. * config.webServerConfig.machineType * Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. * config.maintenanceWindow * Maintenance window during which Cloud Composer components may be under maintenance.", + "description": "Required. A comma-separated list of paths, relative to `Environment`, of fields to update. For example, to set the version of scikit-learn to install in the environment to 0.19.0 and to remove an existing installation of argparse, the `updateMask` parameter would include the following two `paths` values: \"config.softwareConfig.pypiPackages.scikit-learn\" and \"config.softwareConfig.pypiPackages.argparse\". The included patch environment would specify the scikit-learn version as follows: { \"config\":{ \"softwareConfig\":{ \"pypiPackages\":{ \"scikit-learn\":\"==0.19.0\" } } } } Note that in the above example, any existing PyPI packages other than scikit-learn and argparse will be unaffected. Only one update type may be included in a single request's `updateMask`. For example, one cannot update both the PyPI packages and labels in the same request. However, it is possible to update multiple members of a map field simultaneously in the same request. For example, to set the labels \"label1\" and \"label2\" while clearing \"label3\" (assuming it already exists), one can provide the paths \"labels.label1\", \"labels.label2\", and \"labels.label3\" and populate the patch environment as follows: { \"labels\":{ \"label1\":\"new-label1-value\" \"label2\":\"new-label2-value\" } } Note that in the above example, any existing labels that are not included in the `updateMask` will be unaffected. It is also possible to replace an entire map field by providing the map field's path in the `updateMask`. The new value of the field will be that which is provided in the patch environment. For example, to delete all pre-existing user-specified PyPI packages and install botocore at version 1.7.14, the `updateMask` would contain the path \"config.softwareConfig.pypiPackages\", and the patch environment would be the following: { \"config\":{ \"softwareConfig\":{ \"pypiPackages\":{ \"botocore\":\"==1.7.14\" } } } } *Note:* Only the following fields can be updated: * config.softwareConfig.pypiPackages * Replace all custom custom PyPI packages. If a replacement package map is not included in `environment`, all custom PyPI packages are cleared. It is an error to provide both this mask and a mask specifying an individual package. * config.softwareConfig.pypiPackages.packagename * Update the custom PyPI package packagename, preserving other packages. To delete the package, include it in `updateMask`, and omit the mapping for it in `environment.config.softwareConfig.pypiPackages`. It is an error to provide both a mask of this form and the \"config.softwareConfig.pypiPackages\" mask. * labels * Replace all environment labels. If a replacement labels map is not included in `environment`, all labels are cleared. It is an error to provide both this mask and a mask specifying one or more individual labels. * labels.labelName * Set the label named labelName, while preserving other labels. To delete the label, include it in `updateMask` and omit its mapping in `environment.labels`. It is an error to provide both a mask of this form and the \"labels\" mask. * config.nodeCount * Horizontally scale the number of nodes in the environment. An integer greater than or equal to 3 must be provided in the `config.nodeCount` field. * config.webServerNetworkAccessControl * Replace the environment's current WebServerNetworkAccessControl. * config.softwareConfig.airflowConfigOverrides * Replace all Apache Airflow config overrides. If a replacement config overrides map is not included in `environment`, all config overrides are cleared. It is an error to provide both this mask and a mask specifying one or more individual config overrides. * config.softwareConfig.airflowConfigOverrides.section- name * Override the Apache Airflow config property name in the section named section, preserving other properties. To delete the property override, include it in `updateMask` and omit its mapping in `environment.config.softwareConfig.airflowConfigOverrides`. It is an error to provide both a mask of this form and the \"config.softwareConfig.airflowConfigOverrides\" mask. * config.softwareConfig.envVariables * Replace all environment variables. If a replacement environment variable map is not included in `environment`, all custom environment variables are cleared. It is an error to provide both this mask and a mask specifying one or more individual environment variables. * config.softwareConfig.imageVersion * Upgrade the version of the environment in-place. Refer to `SoftwareConfig.image_version` for information on how to format the new image version. Additionally, the new image version cannot effect a version downgrade and must match the current image version's Composer major version and Airflow major and minor versions. Consult the Cloud Composer Version List for valid values. * config.softwareConfig.schedulerCount * Horizontally scale the number of schedulers in Airflow. A positive integer not greater than the number of nodes must be provided in the `config.softwareConfig.schedulerCount` field. * config.databaseConfig.machineType * Cloud SQL machine type used by Airflow database. It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 or db-n1-standard-16. * config.webServerConfig.machineType * Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. * config.maintenanceWindow * Maintenance window during which Cloud Composer components may be under maintenance.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -462,7 +462,7 @@ } } }, - "revision": "20210614", + "revision": "20210625", "rootUrl": "https://composer.googleapis.com/", "schemas": { "AllowedIpRange": { @@ -515,10 +515,21 @@ "readOnly": true, "type": "string" }, + "imageVersion": { + "description": "Composer image for which the build was happening.", + "type": "string" + }, "pypiConflictBuildLogExtract": { "description": "Output only. Extract from a docker image build log containing information about pypi modules conflicts.", "readOnly": true, "type": "string" + }, + "pypiDependencies": { + "additionalProperties": { + "type": "string" + }, + "description": "Pypi dependencies specified in the environment configuration, at the time when the build was triggered.", + "type": "object" } }, "type": "object" @@ -654,6 +665,22 @@ "$ref": "EncryptionConfig", "description": "Optional. The encryption options for the Cloud Composer environment and its dependencies. Cannot be updated." }, + "environmentSize": { + "description": "Optional. The size of the Cloud Composer environment. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer.", + "enum": [ + "ENVIRONMENT_SIZE_UNSPECIFIED", + "ENVIRONMENT_SIZE_SMALL", + "ENVIRONMENT_SIZE_MEDIUM", + "ENVIRONMENT_SIZE_LARGE" + ], + "enumDescriptions": [ + "The size of the environment is unspecified.", + "The environment size is small.", + "The environment size is medium.", + "The environment size is large." + ], + "type": "string" + }, "gkeCluster": { "description": "Output only. The Kubernetes Engine cluster used to run this environment.", "readOnly": true, @@ -687,6 +714,10 @@ "webServerNetworkAccessControl": { "$ref": "WebServerNetworkAccessControl", "description": "Optional. The network-level access control policy for the Airflow web server. If unspecified, no network-level access restrictions will be applied." + }, + "workloadsConfig": { + "$ref": "WorkloadsConfig", + "description": "Optional. The workloads configuration settings for the GKE cluster associated with the Cloud Composer environment. The GKE cluster runs Airflow scheduler, web server and workers workloads. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer." } }, "type": "object" @@ -865,7 +896,7 @@ "type": "array" }, "serviceAccount": { - "description": "Optional. The Google Cloud Platform Service Account to be used by the workloads. If a service account is not specified, the \"default\" Compute Engine service account is used. Cannot be updated .", + "description": "Optional. The Google Cloud Platform Service Account to be used by the workloads. If a service account is not specified, the \"default\" Compute Engine service account is used. Cannot be updated.", "type": "string" }, "subnetwork": { @@ -1002,6 +1033,15 @@ "description": "The configuration information for configuring a Private IP Cloud Composer environment.", "id": "PrivateEnvironmentConfig", "properties": { + "cloudComposerNetworkIpv4CidrBlock": { + "description": "Optional. The CIDR block from which IP range for Cloud Composer Network in tenant project will be reserved. Needs to be disjoint from private_cluster_config.master_ipv4_cidr_block and cloud_sql_ipv4_cidr_block. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer.", + "type": "string" + }, + "cloudComposerNetworkIpv4ReservedRange": { + "description": "Output only. The IP range reserved for the tenant project's Cloud Composer network. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer.", + "readOnly": true, + "type": "string" + }, "cloudSqlIpv4CidrBlock": { "description": "Optional. The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from web_server_ipv4_cidr_block", "type": "string" @@ -1032,6 +1072,33 @@ "properties": {}, "type": "object" }, + "SchedulerResource": { + "description": "Configuration for resources used by Airflow schedulers.", + "id": "SchedulerResource", + "properties": { + "count": { + "description": "Optional. The number of schedulers.", + "format": "int32", + "type": "integer" + }, + "cpu": { + "description": "Optional. CPU request and limit for a single Airflow scheduler replica.", + "format": "float", + "type": "number" + }, + "memoryGb": { + "description": "Optional. Memory (GB) request and limit for a single Airflow scheduler replica.", + "format": "float", + "type": "number" + }, + "storageGb": { + "description": "Optional. Storage (GB) request and limit for a single Airflow scheduler replica.", + "format": "float", + "type": "number" + } + }, + "type": "object" + }, "SoftwareConfig": { "description": "Specifies the selection and configuration of software inside the environment.", "id": "SoftwareConfig", @@ -1119,6 +1186,79 @@ } }, "type": "object" + }, + "WebServerResource": { + "description": "Configuration for resources used by Airflow web server.", + "id": "WebServerResource", + "properties": { + "cpu": { + "description": "Optional. CPU request and limit for Airflow web server.", + "format": "float", + "type": "number" + }, + "memoryGb": { + "description": "Optional. Memory (GB) request and limit for Airflow web server.", + "format": "float", + "type": "number" + }, + "storageGb": { + "description": "Optional. Storage (GB) request and limit for Airflow web server.", + "format": "float", + "type": "number" + } + }, + "type": "object" + }, + "WorkerResource": { + "description": "Configuration for resources used by Airflow workers.", + "id": "WorkerResource", + "properties": { + "cpu": { + "description": "Optional. CPU request and limit for a single Airflow worker replica.", + "format": "float", + "type": "number" + }, + "maxCount": { + "description": "Optional. Maximum number of workers for autoscaling.", + "format": "int32", + "type": "integer" + }, + "memoryGb": { + "description": "Optional. Memory (GB) request and limit for a single Airflow worker replica.", + "format": "float", + "type": "number" + }, + "minCount": { + "description": "Optional. Minimum number of workers for autoscaling.", + "format": "int32", + "type": "integer" + }, + "storageGb": { + "description": "Optional. Storage (GB) request and limit for a single Airflow worker replica.", + "format": "float", + "type": "number" + } + }, + "type": "object" + }, + "WorkloadsConfig": { + "description": "The Kubernetes workloads configuration for GKE cluster associated with the Cloud Composer environment. Supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer.", + "id": "WorkloadsConfig", + "properties": { + "scheduler": { + "$ref": "SchedulerResource", + "description": "Optional. Resources used by Airflow schedulers." + }, + "webServer": { + "$ref": "WebServerResource", + "description": "Optional. Resources used by Airflow web server." + }, + "worker": { + "$ref": "WorkerResource", + "description": "Optional. Resources used by Airflow workers." + } + }, + "type": "object" } }, "servicePath": "", diff --git a/googleapiclient/discovery_cache/documents/container.v1.json b/googleapiclient/discovery_cache/documents/container.v1.json index b0c09e99587..e7eb2a895d5 100644 --- a/googleapiclient/discovery_cache/documents/container.v1.json +++ b/googleapiclient/discovery_cache/documents/container.v1.json @@ -2459,7 +2459,7 @@ } } }, - "revision": "20210605", + "revision": "20210617", "rootUrl": "https://container.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -3006,6 +3006,10 @@ "$ref": "AddonsConfig", "description": "Configurations for the various addons available to run in the cluster." }, + "desiredAuthenticatorGroupsConfig": { + "$ref": "AuthenticatorGroupsConfig", + "description": "The desired authenticator groups config for the cluster." + }, "desiredAutopilot": { "$ref": "Autopilot", "description": "The desired Autopilot configuration for the cluster." diff --git a/googleapiclient/discovery_cache/documents/container.v1beta1.json b/googleapiclient/discovery_cache/documents/container.v1beta1.json index e027c7b1616..1dca3a918d1 100644 --- a/googleapiclient/discovery_cache/documents/container.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/container.v1beta1.json @@ -2484,7 +2484,7 @@ } } }, - "revision": "20210605", + "revision": "20210617", "rootUrl": "https://container.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -3127,6 +3127,10 @@ "$ref": "AddonsConfig", "description": "Configurations for the various addons available to run in the cluster." }, + "desiredAuthenticatorGroupsConfig": { + "$ref": "AuthenticatorGroupsConfig", + "description": "AuthenticatorGroupsConfig specifies the config for the cluster security groups settings." + }, "desiredAutopilot": { "$ref": "Autopilot", "description": "The desired Autopilot configuration for the cluster." diff --git a/googleapiclient/discovery_cache/documents/containeranalysis.v1alpha1.json b/googleapiclient/discovery_cache/documents/containeranalysis.v1alpha1.json index c9ed0352f07..ccd5ad232bd 100644 --- a/googleapiclient/discovery_cache/documents/containeranalysis.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/containeranalysis.v1alpha1.json @@ -1219,7 +1219,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://containeranalysis.googleapis.com/", "schemas": { "Artifact": { diff --git a/googleapiclient/discovery_cache/documents/containeranalysis.v1beta1.json b/googleapiclient/discovery_cache/documents/containeranalysis.v1beta1.json index dd059a53f8c..80419318bb7 100644 --- a/googleapiclient/discovery_cache/documents/containeranalysis.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/containeranalysis.v1beta1.json @@ -853,7 +853,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://containeranalysis.googleapis.com/", "schemas": { "AliasContext": { diff --git a/googleapiclient/discovery_cache/documents/customsearch.v1.json b/googleapiclient/discovery_cache/documents/customsearch.v1.json index 88e36b1a22c..208716f6fe0 100644 --- a/googleapiclient/discovery_cache/documents/customsearch.v1.json +++ b/googleapiclient/discovery_cache/documents/customsearch.v1.json @@ -674,7 +674,7 @@ } } }, - "revision": "20210625", + "revision": "20210703", "rootUrl": "https://customsearch.googleapis.com/", "schemas": { "Promotion": { diff --git a/googleapiclient/discovery_cache/documents/datacatalog.v1.json b/googleapiclient/discovery_cache/documents/datacatalog.v1.json index a2f421cae08..e6262c5ed2a 100644 --- a/googleapiclient/discovery_cache/documents/datacatalog.v1.json +++ b/googleapiclient/discovery_cache/documents/datacatalog.v1.json @@ -1841,7 +1841,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://datacatalog.googleapis.com/", "schemas": { "Binding": { diff --git a/googleapiclient/discovery_cache/documents/datacatalog.v1beta1.json b/googleapiclient/discovery_cache/documents/datacatalog.v1beta1.json index 2b09f043cd6..01f9791c464 100644 --- a/googleapiclient/discovery_cache/documents/datacatalog.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/datacatalog.v1beta1.json @@ -1808,7 +1808,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://datacatalog.googleapis.com/", "schemas": { "Binding": { diff --git a/googleapiclient/discovery_cache/documents/dataflow.v1b3.json b/googleapiclient/discovery_cache/documents/dataflow.v1b3.json index 0745d223efb..3e2cabfcfe9 100644 --- a/googleapiclient/discovery_cache/documents/dataflow.v1b3.json +++ b/googleapiclient/discovery_cache/documents/dataflow.v1b3.json @@ -2225,7 +2225,7 @@ } } }, - "revision": "20210616", + "revision": "20210618", "rootUrl": "https://dataflow.googleapis.com/", "schemas": { "ApproximateProgress": { @@ -2336,7 +2336,7 @@ "type": "string" }, "workerPool": { - "description": "A short and friendly name for the worker pool this event refers to, populated from the value of PoolStageRelation::user_pool_name.", + "description": "A short and friendly name for the worker pool this event refers to.", "type": "string" } }, diff --git a/googleapiclient/discovery_cache/documents/datafusion.v1.json b/googleapiclient/discovery_cache/documents/datafusion.v1.json index ef3141917b3..90f0dde7312 100644 --- a/googleapiclient/discovery_cache/documents/datafusion.v1.json +++ b/googleapiclient/discovery_cache/documents/datafusion.v1.json @@ -161,7 +161,7 @@ "type": "string" }, "pageSize": { - "description": "The maximum number of results to return. If not set, the service will select a default.", + "description": "The maximum number of results to return. If not set, the service selects a default.", "format": "int32", "location": "query", "type": "integer" @@ -637,7 +637,7 @@ } } }, - "revision": "20210322", + "revision": "20210628", "rootUrl": "https://datafusion.googleapis.com/", "schemas": { "Accelerator": { @@ -649,12 +649,14 @@ "enum": [ "ACCELERATOR_TYPE_UNSPECIFIED", "CDC", - "HEALTHCARE" + "HEALTHCARE", + "CCAI_INSIGHTS" ], "enumDescriptions": [ "Default value, if unspecified.", "Change Data Capture accelerator for CDF.", - "Cloud Healthcare accelerator for CDF. This accelerator is to enable Cloud Healthcare specific CDF plugins developed by Healthcare team." + "Cloud Healthcare accelerator for CDF. This accelerator is to enable Cloud Healthcare specific CDF plugins developed by Healthcare team.", + "Contact Center AI Insights This accelerator is used to enable import and export pipelines custom built to streamline CCAI Insights processing." ], "type": "string" }, @@ -753,6 +755,17 @@ "properties": {}, "type": "object" }, + "CryptoKeyConfig": { + "description": "The crypto key configuration. This field is used by the Customer-managed encryption keys (CMEK) feature.", + "id": "CryptoKeyConfig", + "properties": { + "keyReference": { + "description": "The name of the key which is used to encrypt/decrypt customer data. For key in Cloud KMS, the key should be in the format of `projects/*/locations/*/keyRings/*/cryptoKeys/*`.", + "type": "string" + } + }, + "type": "object" + }, "Empty": { "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.", "id": "Empty", @@ -811,6 +824,10 @@ "readOnly": true, "type": "string" }, + "cryptoKeyConfig": { + "$ref": "CryptoKeyConfig", + "description": "The crypto key configuration. This field is used by the Customer-Managed Encryption Keys (CMEK) feature." + }, "dataprocServiceAccount": { "description": "User-managed service account to set on Dataproc when Cloud Data Fusion creates Dataproc to run data processing pipelines. This allows users to have fine-grained access control on Dataproc's accesses to cloud resources.", "type": "string" @@ -844,7 +861,7 @@ "additionalProperties": { "type": "string" }, - "description": "The resource labels for instance to use to annotate any related underlying resources such as GCE VMs. The character '=' is not allowed to be used within the labels.", + "description": "The resource labels for instance to use to annotate any related underlying resources such as Compute Engine VMs. The character '=' is not allowed to be used within the labels.", "type": "object" }, "name": { @@ -1125,7 +1142,7 @@ "additionalProperties": { "type": "string" }, - "description": "Map to hold any additonal status info for the operation If there is an accelerator being enabled/disabled/deleted, this will be populated with accelerator name as key and status as ENABLING, DISABLING or DELETING", + "description": "Map to hold any additional status info for the operation If there is an accelerator being enabled/disabled/deleted, this will be populated with accelerator name as key and status as ENABLING, DISABLING or DELETING", "type": "object" }, "apiVersion": { diff --git a/googleapiclient/discovery_cache/documents/datafusion.v1beta1.json b/googleapiclient/discovery_cache/documents/datafusion.v1beta1.json index b1aee6aa408..743f3c5ec83 100644 --- a/googleapiclient/discovery_cache/documents/datafusion.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/datafusion.v1beta1.json @@ -161,7 +161,7 @@ "type": "string" }, "pageSize": { - "description": "The maximum number of results to return. If not set, the service will select a default.", + "description": "The maximum number of results to return. If not set, the service selects a default.", "format": "int32", "location": "query", "type": "integer" @@ -520,6 +520,102 @@ } }, "resources": { + "dnsPeerings": { + "methods": { + "add": { + "description": "Add DNS peering on the given resource.", + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}/dnsPeerings:add", + "httpMethod": "POST", + "id": "datafusion.projects.locations.instances.dnsPeerings.add", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "The resource on which DNS peering will be created.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/instances/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1beta1/{+parent}/dnsPeerings:add", + "request": { + "$ref": "AddDnsPeeringRequest" + }, + "response": { + "$ref": "AddDnsPeeringResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "List DNS peering for a given resource.", + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}/dnsPeerings:list", + "httpMethod": "GET", + "id": "datafusion.projects.locations.instances.dnsPeerings.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "The maximum number of items to return.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "The next_page_token value to use if there are additional results to retrieve for this list request.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The resource on which dns peering will be listed.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/instances/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1beta1/{+parent}/dnsPeerings:list", + "response": { + "$ref": "ListDnsPeeringsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "remove": { + "description": "Remove DNS peering on the given resource.", + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}/dnsPeerings:remove", + "httpMethod": "POST", + "id": "datafusion.projects.locations.instances.dnsPeerings.remove", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "The resource on which DNS peering will be removed.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/instances/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1beta1/{+parent}/dnsPeerings:remove", + "request": { + "$ref": "RemoveDnsPeeringRequest" + }, + "response": { + "$ref": "RemoveDnsPeeringResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, "namespaces": { "methods": { "getIamPolicy": { @@ -553,6 +649,57 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, + "list": { + "description": "List namespaces in a given instance", + "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}/namespaces", + "httpMethod": "GET", + "id": "datafusion.projects.locations.instances.namespaces.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "The maximum number of items to return.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "The next_page_token value to use if there are additional results to retrieve for this list request.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The instance to list its namespaces.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/instances/[^/]+$", + "required": true, + "type": "string" + }, + "view": { + "description": "By default, only basic information about a namespace is returned (e.g. name). When `NAMESPACE_VIEW_FULL` is specified, additional information associated with a namespace gets returned (e.g. IAM policy set on the namespace)", + "enum": [ + "NAMESPACE_VIEW_UNSPECIFIED", + "NAMESPACE_VIEW_BASIC", + "NAMESPACE_VIEW_FULL" + ], + "enumDescriptions": [ + "Default/unset value, which will use BASIC view.", + "Show the most basic metadata of a namespace", + "Returns all metadata of a namespace" + ], + "location": "query", + "type": "string" + } + }, + "path": "v1beta1/{+parent}/namespaces", + "response": { + "$ref": "ListNamespacesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, "setIamPolicy": { "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}/namespaces/{namespacesId}:setIamPolicy", @@ -786,7 +933,7 @@ } } }, - "revision": "20210322", + "revision": "20210628", "rootUrl": "https://datafusion.googleapis.com/", "schemas": { "Accelerator": { @@ -810,6 +957,23 @@ }, "type": "object" }, + "AddDnsPeeringRequest": { + "description": "Request message to create dns peering.", + "id": "AddDnsPeeringRequest", + "properties": { + "dnsPeering": { + "$ref": "DnsPeering", + "description": "Dns peering config." + } + }, + "type": "object" + }, + "AddDnsPeeringResponse": { + "description": "Response message for set dns peering method.", + "id": "AddDnsPeeringResponse", + "properties": {}, + "type": "object" + }, "AuditConfig": { "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { \"audit_configs\": [ { \"service\": \"allServices\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" }, { \"log_type\": \"ADMIN_READ\" } ] }, { \"service\": \"sampleservice.googleapis.com\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\" }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:aliya@example.com\" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging, and aliya@example.com from DATA_WRITE logging.", "id": "AuditConfig", @@ -886,6 +1050,44 @@ "properties": {}, "type": "object" }, + "CryptoKeyConfig": { + "description": "The crypto key configuration. This field is used by the Customer-managed encryption keys (CMEK) feature.", + "id": "CryptoKeyConfig", + "properties": { + "keyReference": { + "description": "The name of the key which is used to encrypt/decrypt customer data. For key in Cloud KMS, the key should be in the format of `projects/*/locations/*/keyRings/*/cryptoKeys/*`.", + "type": "string" + } + }, + "type": "object" + }, + "DnsPeering": { + "description": "DNS peering configuration. These configurations are used to create DNS peering with the customer Cloud DNS.", + "id": "DnsPeering", + "properties": { + "description": { + "description": "Optional. Optional description of the dns zone.", + "type": "string" + }, + "domain": { + "description": "Required. Name of the dns.", + "type": "string" + }, + "targetNetwork": { + "description": "Optional. Optional target network to which dns peering should happen.", + "type": "string" + }, + "targetProject": { + "description": "Optional. Optional target project to which dns peering should happen.", + "type": "string" + }, + "zone": { + "description": "Required. Name of the zone.", + "type": "string" + } + }, + "type": "object" + }, "Empty": { "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.", "id": "Empty", @@ -915,6 +1117,21 @@ }, "type": "object" }, + "IAMPolicy": { + "description": "IAMPolicy encapsulates the IAM policy name, definition and status of policy fetching.", + "id": "IAMPolicy", + "properties": { + "policy": { + "$ref": "Policy", + "description": "Policy definition if IAM policy fetching is successful, otherwise empty." + }, + "status": { + "$ref": "Status", + "description": "Status of iam policy fetching." + } + }, + "type": "object" + }, "Instance": { "description": "Represents a Data Fusion instance.", "id": "Instance", @@ -944,6 +1161,10 @@ "readOnly": true, "type": "string" }, + "cryptoKeyConfig": { + "$ref": "CryptoKeyConfig", + "description": "The crypto key configuration. This field is used by the Customer-Managed Encryption Keys (CMEK) feature." + }, "dataprocServiceAccount": { "description": "User-managed service account to set on Dataproc when Cloud Data Fusion creates Dataproc to run data processing pipelines. This allows users to have fine-grained access control on Dataproc's accesses to cloud resources.", "type": "string" @@ -1105,6 +1326,24 @@ }, "type": "object" }, + "ListDnsPeeringsResponse": { + "description": "List dns peering response.", + "id": "ListDnsPeeringsResponse", + "properties": { + "dnsPeerings": { + "description": "List of dns peering configs.", + "items": { + "$ref": "DnsPeering" + }, + "type": "array" + }, + "nextPageToken": { + "description": "Token to retrieve the next page of results or empty if there are no more results in the list.", + "type": "string" + } + }, + "type": "object" + }, "ListInstancesResponse": { "description": "Response message for the list instance request.", "id": "ListInstancesResponse", @@ -1148,6 +1387,24 @@ }, "type": "object" }, + "ListNamespacesResponse": { + "description": "List namespaces response.", + "id": "ListNamespacesResponse", + "properties": { + "namespaces": { + "description": "List of namespaces", + "items": { + "$ref": "Namespace" + }, + "type": "array" + }, + "nextPageToken": { + "description": "Token to retrieve the next page of results or empty if there are no more results in the list.", + "type": "string" + } + }, + "type": "object" + }, "ListOperationsResponse": { "description": "The response message for Operations.ListOperations.", "id": "ListOperationsResponse", @@ -1200,6 +1457,21 @@ }, "type": "object" }, + "Namespace": { + "description": "Represents the information of a namespace", + "id": "Namespace", + "properties": { + "iamPolicy": { + "$ref": "IAMPolicy", + "description": "IAM policy associated with this namespace." + }, + "name": { + "description": "Name of the given namespace.", + "type": "string" + } + }, + "type": "object" + }, "NetworkConfig": { "description": "Network configuration for a Data Fusion instance. These configurations are used for peering with the customer network. Configurations are optional when a public Data Fusion instance is to be created. However, providing these configurations allows several benefits, such as reduced network latency while accessing the customer resources from managed Data Fusion instance nodes, as well as access to the customer on-prem resources.", "id": "NetworkConfig", @@ -1318,6 +1590,23 @@ }, "type": "object" }, + "RemoveDnsPeeringRequest": { + "description": "Request message to remove dns peering.", + "id": "RemoveDnsPeeringRequest", + "properties": { + "zone": { + "description": "Required. The zone to be removed.", + "type": "string" + } + }, + "type": "object" + }, + "RemoveDnsPeeringResponse": { + "description": "Response message for set dns peering method.", + "id": "RemoveDnsPeeringResponse", + "properties": {}, + "type": "object" + }, "RemoveIamPolicyRequest": { "description": "Request message for RemoveIamPolicy method.", "id": "RemoveIamPolicyRequest", diff --git a/googleapiclient/discovery_cache/documents/datalabeling.v1beta1.json b/googleapiclient/discovery_cache/documents/datalabeling.v1beta1.json index b3ed187da15..07d7c8541d6 100644 --- a/googleapiclient/discovery_cache/documents/datalabeling.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/datalabeling.v1beta1.json @@ -1596,7 +1596,7 @@ } } }, - "revision": "20210618", + "revision": "20210629", "rootUrl": "https://datalabeling.googleapis.com/", "schemas": { "GoogleCloudDatalabelingV1alpha1CreateInstructionMetadata": { diff --git a/googleapiclient/discovery_cache/documents/datamigration.v1.json b/googleapiclient/discovery_cache/documents/datamigration.v1.json index f4489dc98d4..4919eb5fd24 100644 --- a/googleapiclient/discovery_cache/documents/datamigration.v1.json +++ b/googleapiclient/discovery_cache/documents/datamigration.v1.json @@ -1049,7 +1049,7 @@ } } }, - "revision": "20210616", + "revision": "20210624", "rootUrl": "https://datamigration.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/datamigration.v1beta1.json b/googleapiclient/discovery_cache/documents/datamigration.v1beta1.json index 6ee95c2f2c7..97e18bbf561 100644 --- a/googleapiclient/discovery_cache/documents/datamigration.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/datamigration.v1beta1.json @@ -1049,7 +1049,7 @@ } } }, - "revision": "20210616", + "revision": "20210624", "rootUrl": "https://datamigration.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/dataproc.v1.json b/googleapiclient/discovery_cache/documents/dataproc.v1.json index 2ea4600dbaf..a42ab2ae65f 100644 --- a/googleapiclient/discovery_cache/documents/dataproc.v1.json +++ b/googleapiclient/discovery_cache/documents/dataproc.v1.json @@ -901,7 +901,7 @@ "type": "string" }, "requestId": { - "description": "Optional. A unique id used to identify the request. If the server receives two CreateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", + "description": "Optional. A unique ID used to identify the request. If the server receives two CreateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", "location": "query", "type": "string" } @@ -952,7 +952,7 @@ "type": "string" }, "requestId": { - "description": "Optional. A unique id used to identify the request. If the server receives two DeleteClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", + "description": "Optional. A unique ID used to identify the request. If the server receives two DeleteClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", "location": "query", "type": "string" } @@ -1199,7 +1199,7 @@ "type": "string" }, "requestId": { - "description": "Optional. A unique id used to identify the request. If the server receives two UpdateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", + "description": "Optional. A unique ID used to identify the request. If the server receives two UpdateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", "location": "query", "type": "string" }, @@ -1221,6 +1221,47 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, + "repair": { + "description": "Repairs a cluster.", + "flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:repair", + "httpMethod": "POST", + "id": "dataproc.projects.regions.clusters.repair", + "parameterOrder": [ + "projectId", + "region", + "clusterName" + ], + "parameters": { + "clusterName": { + "description": "Required. The cluster name.", + "location": "path", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Required. The ID of the Google Cloud Platform project the cluster belongs to.", + "location": "path", + "required": true, + "type": "string" + }, + "region": { + "description": "Required. The Dataproc region in which to handle the request.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:repair", + "request": { + "$ref": "RepairClusterRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, "setIamPolicy": { "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}:setIamPolicy", @@ -2260,7 +2301,7 @@ } } }, - "revision": "20210622", + "revision": "20210625", "rootUrl": "https://dataproc.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -4149,6 +4190,21 @@ }, "type": "object" }, + "RepairClusterRequest": { + "description": "A request to repair a cluster.", + "id": "RepairClusterRequest", + "properties": { + "clusterUuid": { + "description": "Optional. Specifying the cluster_uuid means the RPC will fail (with error NOT_FOUND) if a cluster with the specified UUID does not exist.", + "type": "string" + }, + "requestId": { + "description": "Optional. A unique ID used to identify the request. If the server receives two RepairClusterRequests with the same ID, the second request is ignored, and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", + "type": "string" + } + }, + "type": "object" + }, "ReservationAffinity": { "description": "Reservation Affinity for consuming Zonal reservation.", "id": "ReservationAffinity", @@ -4429,7 +4485,7 @@ "type": "string" }, "requestId": { - "description": "Optional. A unique id used to identify the request. If the server receives two StartClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", + "description": "Optional. A unique ID used to identify the request. If the server receives two StartClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", "type": "string" } }, @@ -4471,7 +4527,7 @@ "type": "string" }, "requestId": { - "description": "Optional. A unique id used to identify the request. If the server receives two StopClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", + "description": "Optional. A unique ID used to identify the request. If the server receives two StopClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", "type": "string" } }, diff --git a/googleapiclient/discovery_cache/documents/dataproc.v1beta2.json b/googleapiclient/discovery_cache/documents/dataproc.v1beta2.json index 4d3c8f9868d..50705785789 100644 --- a/googleapiclient/discovery_cache/documents/dataproc.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/dataproc.v1beta2.json @@ -2291,7 +2291,7 @@ } } }, - "revision": "20210622", + "revision": "20210625", "rootUrl": "https://dataproc.googleapis.com/", "schemas": { "AcceleratorConfig": { diff --git a/googleapiclient/discovery_cache/documents/dialogflow.v2.json b/googleapiclient/discovery_cache/documents/dialogflow.v2.json index 3ef948b962b..54b0cf0c5b0 100644 --- a/googleapiclient/discovery_cache/documents/dialogflow.v2.json +++ b/googleapiclient/discovery_cache/documents/dialogflow.v2.json @@ -6915,7 +6915,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://dialogflow.googleapis.com/", "schemas": { "GoogleCloudDialogflowCxV3AudioInput": { @@ -11063,7 +11063,7 @@ "id": "GoogleCloudDialogflowV2Environment", "properties": { "agentVersion": { - "description": "Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/`", + "description": "Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/`", "type": "string" }, "description": { diff --git a/googleapiclient/discovery_cache/documents/dialogflow.v2beta1.json b/googleapiclient/discovery_cache/documents/dialogflow.v2beta1.json index 44dcf66fd3b..1240aac29bf 100644 --- a/googleapiclient/discovery_cache/documents/dialogflow.v2beta1.json +++ b/googleapiclient/discovery_cache/documents/dialogflow.v2beta1.json @@ -7247,7 +7247,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://dialogflow.googleapis.com/", "schemas": { "GoogleCloudDialogflowCxV3AudioInput": { @@ -13174,7 +13174,7 @@ "id": "GoogleCloudDialogflowV2beta1Environment", "properties": { "agentVersion": { - "description": "Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/`", + "description": "Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/`", "type": "string" }, "description": { diff --git a/googleapiclient/discovery_cache/documents/dialogflow.v3.json b/googleapiclient/discovery_cache/documents/dialogflow.v3.json index c989a11f17e..646eb445ca0 100644 --- a/googleapiclient/discovery_cache/documents/dialogflow.v3.json +++ b/googleapiclient/discovery_cache/documents/dialogflow.v3.json @@ -3553,7 +3553,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://dialogflow.googleapis.com/", "schemas": { "GoogleCloudDialogflowCxV3Agent": { diff --git a/googleapiclient/discovery_cache/documents/dialogflow.v3beta1.json b/googleapiclient/discovery_cache/documents/dialogflow.v3beta1.json index cf9f409eb2d..bbd056ff68e 100644 --- a/googleapiclient/discovery_cache/documents/dialogflow.v3beta1.json +++ b/googleapiclient/discovery_cache/documents/dialogflow.v3beta1.json @@ -3553,7 +3553,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://dialogflow.googleapis.com/", "schemas": { "GoogleCloudDialogflowCxV3AudioInput": { diff --git a/googleapiclient/discovery_cache/documents/digitalassetlinks.v1.json b/googleapiclient/discovery_cache/documents/digitalassetlinks.v1.json index 5a2b4bf64b5..3031b5e5df9 100644 --- a/googleapiclient/discovery_cache/documents/digitalassetlinks.v1.json +++ b/googleapiclient/discovery_cache/documents/digitalassetlinks.v1.json @@ -184,7 +184,7 @@ } } }, - "revision": "20210622", + "revision": "20210629", "rootUrl": "https://digitalassetlinks.googleapis.com/", "schemas": { "AndroidAppAsset": { diff --git a/googleapiclient/discovery_cache/documents/dlp.v2.json b/googleapiclient/discovery_cache/documents/dlp.v2.json index f7b8dcaeab6..f3b08071b06 100644 --- a/googleapiclient/discovery_cache/documents/dlp.v2.json +++ b/googleapiclient/discovery_cache/documents/dlp.v2.json @@ -3412,7 +3412,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://dlp.googleapis.com/", "schemas": { "GooglePrivacyDlpV2Action": { diff --git a/googleapiclient/discovery_cache/documents/dns.v1.json b/googleapiclient/discovery_cache/documents/dns.v1.json index f9914dcf684..ff3fdb3b2d3 100644 --- a/googleapiclient/discovery_cache/documents/dns.v1.json +++ b/googleapiclient/discovery_cache/documents/dns.v1.json @@ -1245,7 +1245,7 @@ } } }, - "revision": "20210614", + "revision": "20210618", "rootUrl": "https://dns.googleapis.com/", "schemas": { "Change": { diff --git a/googleapiclient/discovery_cache/documents/dns.v1beta2.json b/googleapiclient/discovery_cache/documents/dns.v1beta2.json index 3908fd1722a..2ec916b3d7d 100644 --- a/googleapiclient/discovery_cache/documents/dns.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/dns.v1beta2.json @@ -1740,7 +1740,7 @@ } } }, - "revision": "20210614", + "revision": "20210618", "rootUrl": "https://dns.googleapis.com/", "schemas": { "Change": { diff --git a/googleapiclient/discovery_cache/documents/documentai.v1.json b/googleapiclient/discovery_cache/documents/documentai.v1.json index 58394454d94..51d02e11239 100644 --- a/googleapiclient/discovery_cache/documents/documentai.v1.json +++ b/googleapiclient/discovery_cache/documents/documentai.v1.json @@ -664,7 +664,7 @@ } } }, - "revision": "20210617", + "revision": "20210625", "rootUrl": "https://documentai.googleapis.com/", "schemas": { "GoogleCloudDocumentaiUiv1beta3CommonOperationMetadata": { @@ -825,6 +825,23 @@ }, "type": "object" }, + "GoogleCloudDocumentaiUiv1beta3ExportProcessorVersionMetadata": { + "description": "Metadata message associated with the ExportProcessorVersion operation.", + "id": "GoogleCloudDocumentaiUiv1beta3ExportProcessorVersionMetadata", + "properties": { + "commonMetadata": { + "$ref": "GoogleCloudDocumentaiUiv1beta3CommonOperationMetadata", + "description": "The common metadata about the operation." + } + }, + "type": "object" + }, + "GoogleCloudDocumentaiUiv1beta3ExportProcessorVersionResponse": { + "description": "Response message associated with the ExportProcessorVersion operation.", + "id": "GoogleCloudDocumentaiUiv1beta3ExportProcessorVersionResponse", + "properties": {}, + "type": "object" + }, "GoogleCloudDocumentaiUiv1beta3SetDefaultProcessorVersionMetadata": { "description": "The long running operation metadata for set default processor version method.", "id": "GoogleCloudDocumentaiUiv1beta3SetDefaultProcessorVersionMetadata", diff --git a/googleapiclient/discovery_cache/documents/documentai.v1beta2.json b/googleapiclient/discovery_cache/documents/documentai.v1beta2.json index 4346329bed4..51283e5c5ab 100644 --- a/googleapiclient/discovery_cache/documents/documentai.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/documentai.v1beta2.json @@ -292,7 +292,7 @@ } } }, - "revision": "20210617", + "revision": "20210625", "rootUrl": "https://documentai.googleapis.com/", "schemas": { "GoogleCloudDocumentaiUiv1beta3CommonOperationMetadata": { @@ -453,6 +453,23 @@ }, "type": "object" }, + "GoogleCloudDocumentaiUiv1beta3ExportProcessorVersionMetadata": { + "description": "Metadata message associated with the ExportProcessorVersion operation.", + "id": "GoogleCloudDocumentaiUiv1beta3ExportProcessorVersionMetadata", + "properties": { + "commonMetadata": { + "$ref": "GoogleCloudDocumentaiUiv1beta3CommonOperationMetadata", + "description": "The common metadata about the operation." + } + }, + "type": "object" + }, + "GoogleCloudDocumentaiUiv1beta3ExportProcessorVersionResponse": { + "description": "Response message associated with the ExportProcessorVersion operation.", + "id": "GoogleCloudDocumentaiUiv1beta3ExportProcessorVersionResponse", + "properties": {}, + "type": "object" + }, "GoogleCloudDocumentaiUiv1beta3SetDefaultProcessorVersionMetadata": { "description": "The long running operation metadata for set default processor version method.", "id": "GoogleCloudDocumentaiUiv1beta3SetDefaultProcessorVersionMetadata", diff --git a/googleapiclient/discovery_cache/documents/documentai.v1beta3.json b/googleapiclient/discovery_cache/documents/documentai.v1beta3.json index a070e1580e2..97add676e83 100644 --- a/googleapiclient/discovery_cache/documents/documentai.v1beta3.json +++ b/googleapiclient/discovery_cache/documents/documentai.v1beta3.json @@ -601,7 +601,7 @@ } } }, - "revision": "20210617", + "revision": "20210625", "rootUrl": "https://documentai.googleapis.com/", "schemas": { "GoogleCloudDocumentaiUiv1beta3CommonOperationMetadata": { @@ -762,6 +762,23 @@ }, "type": "object" }, + "GoogleCloudDocumentaiUiv1beta3ExportProcessorVersionMetadata": { + "description": "Metadata message associated with the ExportProcessorVersion operation.", + "id": "GoogleCloudDocumentaiUiv1beta3ExportProcessorVersionMetadata", + "properties": { + "commonMetadata": { + "$ref": "GoogleCloudDocumentaiUiv1beta3CommonOperationMetadata", + "description": "The common metadata about the operation." + } + }, + "type": "object" + }, + "GoogleCloudDocumentaiUiv1beta3ExportProcessorVersionResponse": { + "description": "Response message associated with the ExportProcessorVersion operation.", + "id": "GoogleCloudDocumentaiUiv1beta3ExportProcessorVersionResponse", + "properties": {}, + "type": "object" + }, "GoogleCloudDocumentaiUiv1beta3SetDefaultProcessorVersionMetadata": { "description": "The long running operation metadata for set default processor version method.", "id": "GoogleCloudDocumentaiUiv1beta3SetDefaultProcessorVersionMetadata", diff --git a/googleapiclient/discovery_cache/documents/domains.v1alpha2.json b/googleapiclient/discovery_cache/documents/domains.v1alpha2.json index d125a6928b6..fa335bafb62 100644 --- a/googleapiclient/discovery_cache/documents/domains.v1alpha2.json +++ b/googleapiclient/discovery_cache/documents/domains.v1alpha2.json @@ -721,7 +721,7 @@ } } }, - "revision": "20210607", + "revision": "20210629", "rootUrl": "https://domains.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/domains.v1beta1.json b/googleapiclient/discovery_cache/documents/domains.v1beta1.json index 5a0099ff9a7..e013ffce9fe 100644 --- a/googleapiclient/discovery_cache/documents/domains.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/domains.v1beta1.json @@ -721,7 +721,7 @@ } } }, - "revision": "20210607", + "revision": "20210629", "rootUrl": "https://domains.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/domainsrdap.v1.json b/googleapiclient/discovery_cache/documents/domainsrdap.v1.json index 85101459fdf..8e13209f257 100644 --- a/googleapiclient/discovery_cache/documents/domainsrdap.v1.json +++ b/googleapiclient/discovery_cache/documents/domainsrdap.v1.json @@ -289,7 +289,7 @@ } } }, - "revision": "20210628", + "revision": "20210701", "rootUrl": "https://domainsrdap.googleapis.com/", "schemas": { "HttpBody": { diff --git a/googleapiclient/discovery_cache/documents/doubleclicksearch.v2.json b/googleapiclient/discovery_cache/documents/doubleclicksearch.v2.json index 006a05f2c21..35ea4cd8ec9 100644 --- a/googleapiclient/discovery_cache/documents/doubleclicksearch.v2.json +++ b/googleapiclient/discovery_cache/documents/doubleclicksearch.v2.json @@ -399,7 +399,7 @@ } } }, - "revision": "20210622", + "revision": "20210629", "rootUrl": "https://doubleclicksearch.googleapis.com/", "schemas": { "Availability": { diff --git a/googleapiclient/discovery_cache/documents/drive.v2.json b/googleapiclient/discovery_cache/documents/drive.v2.json index ceea2c352e7..cc44aeeb654 100644 --- a/googleapiclient/discovery_cache/documents/drive.v2.json +++ b/googleapiclient/discovery_cache/documents/drive.v2.json @@ -38,7 +38,7 @@ "description": "Manages files in Drive including uploading, downloading, searching, detecting changes, and updating sharing permissions.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/drive/", - "etag": "\"uWj2hSb4GVjzdDlAnRd2gbM1ZQ8/nNoyhOtI184Q7f0384VnTm4hV3w\"", + "etag": "\"uWj2hSb4GVjzdDlAnRd2gbM1ZQ8/Dp2bPJMp8Fmx1gY2Dos9-f3_ieQ\"", "icons": { "x16": "https://ssl.gstatic.com/docs/doclist/images/drive_icon_16.png", "x32": "https://ssl.gstatic.com/docs/doclist/images/drive_icon_32.png" @@ -1030,7 +1030,7 @@ "parameters": { "maxResults": { "default": "10", - "description": "Maximum number of shared drives to return.", + "description": "Maximum number of shared drives to return per page.", "format": "int32", "location": "query", "maximum": "100", @@ -3527,7 +3527,7 @@ } } }, - "revision": "20210621", + "revision": "20210628", "rootUrl": "https://www.googleapis.com/", "schemas": { "About": { diff --git a/googleapiclient/discovery_cache/documents/drive.v3.json b/googleapiclient/discovery_cache/documents/drive.v3.json index 1a0103986c6..4df05f6c4af 100644 --- a/googleapiclient/discovery_cache/documents/drive.v3.json +++ b/googleapiclient/discovery_cache/documents/drive.v3.json @@ -35,7 +35,7 @@ "description": "Manages files in Drive including uploading, downloading, searching, detecting changes, and updating sharing permissions.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/drive/", - "etag": "\"uWj2hSb4GVjzdDlAnRd2gbM1ZQ8/RDW9kL-wYxpc9TU9SMLoLWEfo9w\"", + "etag": "\"uWj2hSb4GVjzdDlAnRd2gbM1ZQ8/2Z0xD6zo7lxnqrSN16DOJp_HnVE\"", "icons": { "x16": "https://ssl.gstatic.com/docs/doclist/images/drive_icon_16.png", "x32": "https://ssl.gstatic.com/docs/doclist/images/drive_icon_32.png" @@ -680,7 +680,7 @@ "parameters": { "pageSize": { "default": "10", - "description": "Maximum number of shared drives to return.", + "description": "Maximum number of shared drives to return per page.", "format": "int32", "location": "query", "maximum": "100", @@ -2191,7 +2191,7 @@ } } }, - "revision": "20210621", + "revision": "20210628", "rootUrl": "https://www.googleapis.com/", "schemas": { "About": { diff --git a/googleapiclient/discovery_cache/documents/driveactivity.v2.json b/googleapiclient/discovery_cache/documents/driveactivity.v2.json index 62e16fa8958..cdd390add7d 100644 --- a/googleapiclient/discovery_cache/documents/driveactivity.v2.json +++ b/googleapiclient/discovery_cache/documents/driveactivity.v2.json @@ -132,7 +132,7 @@ } } }, - "revision": "20210623", + "revision": "20210629", "rootUrl": "https://driveactivity.googleapis.com/", "schemas": { "Action": { diff --git a/googleapiclient/discovery_cache/documents/essentialcontacts.v1.json b/googleapiclient/discovery_cache/documents/essentialcontacts.v1.json index a133d80e407..3adb7870ac0 100644 --- a/googleapiclient/discovery_cache/documents/essentialcontacts.v1.json +++ b/googleapiclient/discovery_cache/documents/essentialcontacts.v1.json @@ -850,7 +850,7 @@ } } }, - "revision": "20210625", + "revision": "20210703", "rootUrl": "https://essentialcontacts.googleapis.com/", "schemas": { "GoogleCloudEssentialcontactsV1ComputeContactsResponse": { diff --git a/googleapiclient/discovery_cache/documents/factchecktools.v1alpha1.json b/googleapiclient/discovery_cache/documents/factchecktools.v1alpha1.json index ecfe815ed74..77d0611be28 100644 --- a/googleapiclient/discovery_cache/documents/factchecktools.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/factchecktools.v1alpha1.json @@ -304,7 +304,7 @@ } } }, - "revision": "20210625", + "revision": "20210703", "rootUrl": "https://factchecktools.googleapis.com/", "schemas": { "GoogleFactcheckingFactchecktoolsV1alpha1Claim": { diff --git a/googleapiclient/discovery_cache/documents/fcm.v1.json b/googleapiclient/discovery_cache/documents/fcm.v1.json index 0d456bb1d95..e9157db2df6 100644 --- a/googleapiclient/discovery_cache/documents/fcm.v1.json +++ b/googleapiclient/discovery_cache/documents/fcm.v1.json @@ -142,7 +142,7 @@ } } }, - "revision": "20210621", + "revision": "20210628", "rootUrl": "https://fcm.googleapis.com/", "schemas": { "AndroidConfig": { diff --git a/googleapiclient/discovery_cache/documents/file.v1.json b/googleapiclient/discovery_cache/documents/file.v1.json index 76875cb2ded..3fb3dc42ae1 100644 --- a/googleapiclient/discovery_cache/documents/file.v1.json +++ b/googleapiclient/discovery_cache/documents/file.v1.json @@ -672,7 +672,7 @@ } } }, - "revision": "20210524", + "revision": "20210629", "rootUrl": "https://file.googleapis.com/", "schemas": { "Backup": { diff --git a/googleapiclient/discovery_cache/documents/file.v1beta1.json b/googleapiclient/discovery_cache/documents/file.v1beta1.json index ad31c27fab7..c881a791c76 100644 --- a/googleapiclient/discovery_cache/documents/file.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/file.v1beta1.json @@ -672,7 +672,7 @@ } } }, - "revision": "20210524", + "revision": "20210629", "rootUrl": "https://file.googleapis.com/", "schemas": { "Backup": { diff --git a/googleapiclient/discovery_cache/documents/firebase.v1beta1.json b/googleapiclient/discovery_cache/documents/firebase.v1beta1.json index ecc13fe7c07..598247c5e5b 100644 --- a/googleapiclient/discovery_cache/documents/firebase.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/firebase.v1beta1.json @@ -1121,7 +1121,7 @@ } } }, - "revision": "20210625", + "revision": "20210702", "rootUrl": "https://firebase.googleapis.com/", "schemas": { "AddFirebaseRequest": { diff --git a/googleapiclient/discovery_cache/documents/firebaseappcheck.v1beta.json b/googleapiclient/discovery_cache/documents/firebaseappcheck.v1beta.json index 55c2543aed8..06b249868c4 100644 --- a/googleapiclient/discovery_cache/documents/firebaseappcheck.v1beta.json +++ b/googleapiclient/discovery_cache/documents/firebaseappcheck.v1beta.json @@ -863,7 +863,7 @@ } } }, - "revision": "20210621", + "revision": "20210625", "rootUrl": "https://firebaseappcheck.googleapis.com/", "schemas": { "GoogleFirebaseAppcheckV1betaAppAttestChallengeResponse": { @@ -980,7 +980,7 @@ "type": "object" }, "GoogleFirebaseAppcheckV1betaDeviceCheckConfig": { - "description": "An app's DeviceCheck configuration object. This configuration is used by ExchangeDeviceCheckToken to validate device tokens issued to apps by DeviceCheck.", + "description": "An app's DeviceCheck configuration object. This configuration is used by ExchangeDeviceCheckToken to validate device tokens issued to apps by DeviceCheck. It also controls certain properties of the returned App Check token, such as its ttl. Note that the Team ID registered with your app is used as part of the validation process. Please register it via the Firebase Console or programmatically via the [Firebase Management Service](https://firebase.google.com/docs/projects/api/reference/rest/v1beta1/projects.iosApps/patch).", "id": "GoogleFirebaseAppcheckV1betaDeviceCheckConfig", "properties": { "keyId": { @@ -1048,7 +1048,7 @@ "type": "object" }, "GoogleFirebaseAppcheckV1betaExchangeAppAttestAttestationResponse": { - "description": "Response message for ExchangeAppAttestAttestation", + "description": "Response message for ExchangeAppAttestAttestation and ExchangeAppAttestDebugAttestation", "id": "GoogleFirebaseAppcheckV1betaExchangeAppAttestAttestationResponse", "properties": { "artifact": { @@ -1206,7 +1206,7 @@ "type": "object" }, "GoogleFirebaseAppcheckV1betaRecaptchaConfig": { - "description": "An app's reCAPTCHA v3 configuration object. This configuration is used by ExchangeRecaptchaToken to validate reCAPTCHA tokens issued to apps by reCAPTCHA v3.", + "description": "An app's reCAPTCHA v3 configuration object. This configuration is used by ExchangeRecaptchaToken to validate reCAPTCHA tokens issued to apps by reCAPTCHA v3. It also controls certain properties of the returned App Check token, such as its ttl.", "id": "GoogleFirebaseAppcheckV1betaRecaptchaConfig", "properties": { "name": { diff --git a/googleapiclient/discovery_cache/documents/firebasedatabase.v1beta.json b/googleapiclient/discovery_cache/documents/firebasedatabase.v1beta.json index acc10f4bbad..e0d3a85cb6a 100644 --- a/googleapiclient/discovery_cache/documents/firebasedatabase.v1beta.json +++ b/googleapiclient/discovery_cache/documents/firebasedatabase.v1beta.json @@ -317,7 +317,7 @@ } } }, - "revision": "20210625", + "revision": "20210702", "rootUrl": "https://firebasedatabase.googleapis.com/", "schemas": { "DatabaseInstance": { diff --git a/googleapiclient/discovery_cache/documents/firebasedynamiclinks.v1.json b/googleapiclient/discovery_cache/documents/firebasedynamiclinks.v1.json index dd98de46bc3..d630a8aa1f0 100644 --- a/googleapiclient/discovery_cache/documents/firebasedynamiclinks.v1.json +++ b/googleapiclient/discovery_cache/documents/firebasedynamiclinks.v1.json @@ -224,7 +224,7 @@ } } }, - "revision": "20210621", + "revision": "20210629", "rootUrl": "https://firebasedynamiclinks.googleapis.com/", "schemas": { "AnalyticsInfo": { diff --git a/googleapiclient/discovery_cache/documents/firebaseml.v1.json b/googleapiclient/discovery_cache/documents/firebaseml.v1.json index bd2ac050e52..3f854b35aac 100644 --- a/googleapiclient/discovery_cache/documents/firebaseml.v1.json +++ b/googleapiclient/discovery_cache/documents/firebaseml.v1.json @@ -204,7 +204,7 @@ } } }, - "revision": "20210623", + "revision": "20210628", "rootUrl": "https://firebaseml.googleapis.com/", "schemas": { "CancelOperationRequest": { diff --git a/googleapiclient/discovery_cache/documents/firebaseml.v1beta2.json b/googleapiclient/discovery_cache/documents/firebaseml.v1beta2.json index 1b7d1ba7fd2..76b6deb55bb 100644 --- a/googleapiclient/discovery_cache/documents/firebaseml.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/firebaseml.v1beta2.json @@ -318,7 +318,7 @@ } } }, - "revision": "20210623", + "revision": "20210628", "rootUrl": "https://firebaseml.googleapis.com/", "schemas": { "DownloadModelResponse": { diff --git a/googleapiclient/discovery_cache/documents/fitness.v1.json b/googleapiclient/discovery_cache/documents/fitness.v1.json index b507a7b517d..d76c0c9ee8d 100644 --- a/googleapiclient/discovery_cache/documents/fitness.v1.json +++ b/googleapiclient/discovery_cache/documents/fitness.v1.json @@ -831,7 +831,7 @@ } } }, - "revision": "20210623", + "revision": "20210630", "rootUrl": "https://fitness.googleapis.com/", "schemas": { "AggregateBucket": { diff --git a/googleapiclient/discovery_cache/documents/games.v1.json b/googleapiclient/discovery_cache/documents/games.v1.json index 581092856cf..dd220193bb5 100644 --- a/googleapiclient/discovery_cache/documents/games.v1.json +++ b/googleapiclient/discovery_cache/documents/games.v1.json @@ -1224,7 +1224,7 @@ } } }, - "revision": "20210614", + "revision": "20210624", "rootUrl": "https://games.googleapis.com/", "schemas": { "AchievementDefinition": { diff --git a/googleapiclient/discovery_cache/documents/gamesConfiguration.v1configuration.json b/googleapiclient/discovery_cache/documents/gamesConfiguration.v1configuration.json index d75e3cb8d6b..b67ea15c788 100644 --- a/googleapiclient/discovery_cache/documents/gamesConfiguration.v1configuration.json +++ b/googleapiclient/discovery_cache/documents/gamesConfiguration.v1configuration.json @@ -439,7 +439,7 @@ } } }, - "revision": "20210614", + "revision": "20210624", "rootUrl": "https://gamesconfiguration.googleapis.com/", "schemas": { "AchievementConfiguration": { diff --git a/googleapiclient/discovery_cache/documents/gamesManagement.v1management.json b/googleapiclient/discovery_cache/documents/gamesManagement.v1management.json index cfa0a09e7c3..fe70970ef46 100644 --- a/googleapiclient/discovery_cache/documents/gamesManagement.v1management.json +++ b/googleapiclient/discovery_cache/documents/gamesManagement.v1management.json @@ -471,7 +471,7 @@ } } }, - "revision": "20210614", + "revision": "20210624", "rootUrl": "https://gamesmanagement.googleapis.com/", "schemas": { "AchievementResetAllResponse": { diff --git a/googleapiclient/discovery_cache/documents/gameservices.v1.json b/googleapiclient/discovery_cache/documents/gameservices.v1.json index 0980bcaed52..2583d85a33b 100644 --- a/googleapiclient/discovery_cache/documents/gameservices.v1.json +++ b/googleapiclient/discovery_cache/documents/gameservices.v1.json @@ -1312,7 +1312,7 @@ } } }, - "revision": "20210610", + "revision": "20210622", "rootUrl": "https://gameservices.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/gameservices.v1beta.json b/googleapiclient/discovery_cache/documents/gameservices.v1beta.json index 68074f60cc6..23f910e5b1f 100644 --- a/googleapiclient/discovery_cache/documents/gameservices.v1beta.json +++ b/googleapiclient/discovery_cache/documents/gameservices.v1beta.json @@ -1357,7 +1357,7 @@ } } }, - "revision": "20210610", + "revision": "20210622", "rootUrl": "https://gameservices.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1.json b/googleapiclient/discovery_cache/documents/gkehub.v1.json index 23544da5214..ae7c060d098 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v1.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v1.json @@ -905,7 +905,7 @@ } } }, - "revision": "20210622", + "revision": "20210625", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1alpha.json b/googleapiclient/discovery_cache/documents/gkehub.v1alpha.json index c0b3885b981..ba8b1864796 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v1alpha.json @@ -670,7 +670,7 @@ } } }, - "revision": "20210622", + "revision": "20210625", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1alpha2.json b/googleapiclient/discovery_cache/documents/gkehub.v1alpha2.json index c2621bb1129..6b5fdfb1e64 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v1alpha2.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v1alpha2.json @@ -652,7 +652,7 @@ } } }, - "revision": "20210622", + "revision": "20210625", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1beta.json b/googleapiclient/discovery_cache/documents/gkehub.v1beta.json index 9b60b89a650..c74b5aa5907 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v1beta.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v1beta.json @@ -670,7 +670,7 @@ } } }, - "revision": "20210622", + "revision": "20210625", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1beta1.json b/googleapiclient/discovery_cache/documents/gkehub.v1beta1.json index dc8da7710f8..b9794340187 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v1beta1.json @@ -706,7 +706,7 @@ } } }, - "revision": "20210622", + "revision": "20210625", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/gmail.v1.json b/googleapiclient/discovery_cache/documents/gmail.v1.json index 4c6c49247bb..e5af8305088 100644 --- a/googleapiclient/discovery_cache/documents/gmail.v1.json +++ b/googleapiclient/discovery_cache/documents/gmail.v1.json @@ -2682,7 +2682,7 @@ } } }, - "revision": "20210614", + "revision": "20210628", "rootUrl": "https://gmail.googleapis.com/", "schemas": { "AutoForwarding": { diff --git a/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1.json b/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1.json index a760f2df2da..19310c5bc09 100644 --- a/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1.json +++ b/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1.json @@ -265,7 +265,7 @@ } } }, - "revision": "20210625", + "revision": "20210703", "rootUrl": "https://gmailpostmastertools.googleapis.com/", "schemas": { "DeliveryError": { diff --git a/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1beta1.json b/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1beta1.json index c8a65464b46..fc46ad6787e 100644 --- a/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1beta1.json @@ -265,7 +265,7 @@ } } }, - "revision": "20210625", + "revision": "20210703", "rootUrl": "https://gmailpostmastertools.googleapis.com/", "schemas": { "DeliveryError": { diff --git a/googleapiclient/discovery_cache/documents/groupsmigration.v1.json b/googleapiclient/discovery_cache/documents/groupsmigration.v1.json index fa59e07c6f6..3ec8875867e 100644 --- a/googleapiclient/discovery_cache/documents/groupsmigration.v1.json +++ b/googleapiclient/discovery_cache/documents/groupsmigration.v1.json @@ -146,7 +146,7 @@ } } }, - "revision": "20210609", + "revision": "20210625", "rootUrl": "https://groupsmigration.googleapis.com/", "schemas": { "Groups": { diff --git a/googleapiclient/discovery_cache/documents/groupssettings.v1.json b/googleapiclient/discovery_cache/documents/groupssettings.v1.json index 5ca6b8333fd..544129ccd9b 100644 --- a/googleapiclient/discovery_cache/documents/groupssettings.v1.json +++ b/googleapiclient/discovery_cache/documents/groupssettings.v1.json @@ -152,7 +152,7 @@ } } }, - "revision": "20210622", + "revision": "20210624", "rootUrl": "https://www.googleapis.com/", "schemas": { "Groups": { @@ -191,6 +191,10 @@ "description": "When a message is rejected, this is text for the rejection notification sent to the message's author. By default, this property is empty and has no value in the API's response body. The maximum notification text size is 10,000 characters. Note: Requires sendMessageDenyNotification property to be true.", "type": "string" }, + "default_sender": { + "description": "Default sender for members who can post messages as the group. Possible values are: - `DEFAULT_SELF`: By default messages will be sent from the user - `GROUP`: By default messages will be sent from the group", + "type": "string" + }, "description": { "description": "Description of the group. This property value may be an empty string if no group description has been entered. If entered, the maximum group description is no more than 300 characters.", "type": "string" diff --git a/googleapiclient/discovery_cache/documents/iamcredentials.v1.json b/googleapiclient/discovery_cache/documents/iamcredentials.v1.json index f0abba0f97e..7cfbb47c101 100644 --- a/googleapiclient/discovery_cache/documents/iamcredentials.v1.json +++ b/googleapiclient/discovery_cache/documents/iamcredentials.v1.json @@ -226,7 +226,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://iamcredentials.googleapis.com/", "schemas": { "GenerateAccessTokenRequest": { diff --git a/googleapiclient/discovery_cache/documents/ideahub.v1alpha.json b/googleapiclient/discovery_cache/documents/ideahub.v1alpha.json index b101f2b0cde..d2bda17eaff 100644 --- a/googleapiclient/discovery_cache/documents/ideahub.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/ideahub.v1alpha.json @@ -342,7 +342,7 @@ } } }, - "revision": "20210625", + "revision": "20210705", "rootUrl": "https://ideahub.googleapis.com/", "schemas": { "GoogleSearchIdeahubV1alphaAvailableLocale": { diff --git a/googleapiclient/discovery_cache/documents/indexing.v3.json b/googleapiclient/discovery_cache/documents/indexing.v3.json index 57742b8a752..b6f337dbe29 100644 --- a/googleapiclient/discovery_cache/documents/indexing.v3.json +++ b/googleapiclient/discovery_cache/documents/indexing.v3.json @@ -149,7 +149,7 @@ } } }, - "revision": "20210623", + "revision": "20210624", "rootUrl": "https://indexing.googleapis.com/", "schemas": { "PublishUrlNotificationResponse": { diff --git a/googleapiclient/discovery_cache/documents/libraryagent.v1.json b/googleapiclient/discovery_cache/documents/libraryagent.v1.json index 5751c38741c..73812e074de 100644 --- a/googleapiclient/discovery_cache/documents/libraryagent.v1.json +++ b/googleapiclient/discovery_cache/documents/libraryagent.v1.json @@ -279,7 +279,7 @@ } } }, - "revision": "20210625", + "revision": "20210703", "rootUrl": "https://libraryagent.googleapis.com/", "schemas": { "GoogleExampleLibraryagentV1Book": { diff --git a/googleapiclient/discovery_cache/documents/licensing.v1.json b/googleapiclient/discovery_cache/documents/licensing.v1.json index 56c4f9b365d..005650dfa95 100644 --- a/googleapiclient/discovery_cache/documents/licensing.v1.json +++ b/googleapiclient/discovery_cache/documents/licensing.v1.json @@ -400,7 +400,7 @@ } } }, - "revision": "20210626", + "revision": "20210703", "rootUrl": "https://licensing.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/lifesciences.v2beta.json b/googleapiclient/discovery_cache/documents/lifesciences.v2beta.json index 446335b53ef..e850856b2c6 100644 --- a/googleapiclient/discovery_cache/documents/lifesciences.v2beta.json +++ b/googleapiclient/discovery_cache/documents/lifesciences.v2beta.json @@ -312,7 +312,7 @@ } } }, - "revision": "20210617", + "revision": "20210618", "rootUrl": "https://lifesciences.googleapis.com/", "schemas": { "Accelerator": { diff --git a/googleapiclient/discovery_cache/documents/localservices.v1.json b/googleapiclient/discovery_cache/documents/localservices.v1.json index a4de34dd414..3a9bb6b1884 100644 --- a/googleapiclient/discovery_cache/documents/localservices.v1.json +++ b/googleapiclient/discovery_cache/documents/localservices.v1.json @@ -250,7 +250,7 @@ } } }, - "revision": "20210625", + "revision": "20210703", "rootUrl": "https://localservices.googleapis.com/", "schemas": { "GoogleAdsHomeservicesLocalservicesV1AccountReport": { diff --git a/googleapiclient/discovery_cache/documents/logging.v2.json b/googleapiclient/discovery_cache/documents/logging.v2.json index a0ce6dcd7f0..d0caebec14a 100644 --- a/googleapiclient/discovery_cache/documents/logging.v2.json +++ b/googleapiclient/discovery_cache/documents/logging.v2.json @@ -728,6 +728,83 @@ } } } + }, + "operations": { + "methods": { + "cancel": { + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.", + "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/operations/{operationsId}:cancel", + "httpMethod": "POST", + "id": "logging.billingAccounts.locations.operations.cancel", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource to be cancelled.", + "location": "path", + "pattern": "^billingAccounts/[^/]+/locations/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}:cancel", + "request": { + "$ref": "CancelOperationRequest" + }, + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "list": { + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as \"/v1/{name=users/*}/operations\" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", + "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/operations", + "httpMethod": "GET", + "id": "logging.billingAccounts.locations.operations.list", + "parameterOrder": [ + "name" + ], + "parameters": { + "filter": { + "description": "The standard list filter.", + "location": "query", + "type": "string" + }, + "name": { + "description": "The name of the operation's parent resource.", + "location": "path", + "pattern": "^billingAccounts/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "The standard list page size.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "The standard list page token.", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+name}/operations", + "response": { + "$ref": "ListOperationsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + } + } } } }, @@ -806,6 +883,38 @@ } } }, + "operations": { + "methods": { + "get": { + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + "flatPath": "v2/billingAccounts/{billingAccountsId}/operations/{operationsId}", + "httpMethod": "GET", + "id": "logging.billingAccounts.operations.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource.", + "location": "path", + "pattern": "^billingAccounts/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + } + } + }, "sinks": { "methods": { "create": { @@ -1021,6 +1130,25 @@ }, "entries": { "methods": { + "copy": { + "description": "Copies a set of log entries from a logging bucket to a Cloud Storage bucket.", + "flatPath": "v2/entries:copy", + "httpMethod": "POST", + "id": "logging.entries.copy", + "parameterOrder": [], + "parameters": {}, + "path": "v2/entries:copy", + "request": { + "$ref": "CopyLogEntriesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, "list": { "description": "Lists log entries. Use this method to retrieve log entries that originated from a project/folder/organization/billing account. For ways to export log entries, see Exporting Logs (https://cloud.google.com/logging/docs/export).", "flatPath": "v2/entries:list", @@ -1847,6 +1975,111 @@ } } } + }, + "operations": { + "methods": { + "cancel": { + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.", + "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/operations/{operationsId}:cancel", + "httpMethod": "POST", + "id": "logging.folders.locations.operations.cancel", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource to be cancelled.", + "location": "path", + "pattern": "^folders/[^/]+/locations/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}:cancel", + "request": { + "$ref": "CancelOperationRequest" + }, + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "get": { + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/operations/{operationsId}", + "httpMethod": "GET", + "id": "logging.folders.locations.operations.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource.", + "location": "path", + "pattern": "^folders/[^/]+/locations/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + }, + "list": { + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as \"/v1/{name=users/*}/operations\" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", + "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/operations", + "httpMethod": "GET", + "id": "logging.folders.locations.operations.list", + "parameterOrder": [ + "name" + ], + "parameters": { + "filter": { + "description": "The standard list filter.", + "location": "query", + "type": "string" + }, + "name": { + "description": "The name of the operation's parent resource.", + "location": "path", + "pattern": "^folders/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "The standard list page size.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "The standard list page token.", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+name}/operations", + "response": { + "$ref": "ListOperationsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + } + } } } }, @@ -2576,6 +2809,111 @@ } } } + }, + "operations": { + "methods": { + "cancel": { + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.", + "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/operations/{operationsId}:cancel", + "httpMethod": "POST", + "id": "logging.locations.operations.cancel", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource to be cancelled.", + "location": "path", + "pattern": "^[^/]+/[^/]+/locations/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}:cancel", + "request": { + "$ref": "CancelOperationRequest" + }, + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "get": { + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/operations/{operationsId}", + "httpMethod": "GET", + "id": "logging.locations.operations.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource.", + "location": "path", + "pattern": "^[^/]+/[^/]+/locations/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + }, + "list": { + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as \"/v1/{name=users/*}/operations\" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", + "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/operations", + "httpMethod": "GET", + "id": "logging.locations.operations.list", + "parameterOrder": [ + "name" + ], + "parameters": { + "filter": { + "description": "The standard list filter.", + "location": "query", + "type": "string" + }, + "name": { + "description": "The name of the operation's parent resource.", + "location": "path", + "pattern": "^[^/]+/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "The standard list page size.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "The standard list page token.", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+name}/operations", + "response": { + "$ref": "ListOperationsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + } + } } } }, @@ -3316,42 +3654,147 @@ "https://www.googleapis.com/auth/logging.read" ] }, - "patch": { - "description": "Updates a view. This method replaces the following fields in the existing view with values from the new view: filter.", - "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets/{bucketsId}/views/{viewsId}", - "httpMethod": "PATCH", - "id": "logging.organizations.locations.buckets.views.patch", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The full resource name of the view to update \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id/views/my-view-id\".", - "location": "path", - "pattern": "^organizations/[^/]+/locations/[^/]+/buckets/[^/]+/views/[^/]+$", - "required": true, - "type": "string" - }, - "updateMask": { - "description": "Optional. Field mask that specifies the fields in view that need an update. A field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", - "format": "google-fieldmask", - "location": "query", - "type": "string" - } - }, - "path": "v2/{+name}", - "request": { - "$ref": "LogView" - }, - "response": { - "$ref": "LogView" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin" - ] + "patch": { + "description": "Updates a view. This method replaces the following fields in the existing view with values from the new view: filter.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets/{bucketsId}/views/{viewsId}", + "httpMethod": "PATCH", + "id": "logging.organizations.locations.buckets.views.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The full resource name of the view to update \"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]\" Example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id/views/my-view-id\".", + "location": "path", + "pattern": "^organizations/[^/]+/locations/[^/]+/buckets/[^/]+/views/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Optional. Field mask that specifies the fields in view that need an update. A field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMaskExample: updateMask=filter.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+name}", + "request": { + "$ref": "LogView" + }, + "response": { + "$ref": "LogView" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + } + } + } + } + }, + "operations": { + "methods": { + "cancel": { + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/operations/{operationsId}:cancel", + "httpMethod": "POST", + "id": "logging.organizations.locations.operations.cancel", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource to be cancelled.", + "location": "path", + "pattern": "^organizations/[^/]+/locations/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}:cancel", + "request": { + "$ref": "CancelOperationRequest" + }, + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "get": { + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/operations/{operationsId}", + "httpMethod": "GET", + "id": "logging.organizations.locations.operations.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource.", + "location": "path", + "pattern": "^organizations/[^/]+/locations/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + }, + "list": { + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as \"/v1/{name=users/*}/operations\" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", + "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/operations", + "httpMethod": "GET", + "id": "logging.organizations.locations.operations.list", + "parameterOrder": [ + "name" + ], + "parameters": { + "filter": { + "description": "The standard list filter.", + "location": "query", + "type": "string" + }, + "name": { + "description": "The name of the operation's parent resource.", + "location": "path", + "pattern": "^organizations/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "The standard list page size.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "The standard list page token.", + "location": "query", + "type": "string" } - } + }, + "path": "v2/{+name}/operations", + "response": { + "$ref": "ListOperationsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] } } } @@ -4246,6 +4689,111 @@ } } } + }, + "operations": { + "methods": { + "cancel": { + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.", + "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}:cancel", + "httpMethod": "POST", + "id": "logging.projects.locations.operations.cancel", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource to be cancelled.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}:cancel", + "request": { + "$ref": "CancelOperationRequest" + }, + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ] + }, + "get": { + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", + "httpMethod": "GET", + "id": "logging.projects.locations.operations.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + }, + "list": { + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as \"/v1/{name=users/*}/operations\" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", + "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/operations", + "httpMethod": "GET", + "id": "logging.projects.locations.operations.list", + "parameterOrder": [ + "name" + ], + "parameters": { + "filter": { + "description": "The standard list filter.", + "location": "query", + "type": "string" + }, + "name": { + "description": "The name of the operation's parent resource.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "The standard list page size.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "The standard list page token.", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+name}/operations", + "response": { + "$ref": "ListOperationsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ] + } + } } } }, @@ -4934,7 +5482,7 @@ } } }, - "revision": "20210608", + "revision": "20210625", "rootUrl": "https://logging.googleapis.com/", "schemas": { "BigQueryOptions": { @@ -4972,6 +5520,12 @@ }, "type": "object" }, + "CancelOperationRequest": { + "description": "The request message for Operations.CancelOperation.", + "id": "CancelOperationRequest", + "properties": {}, + "type": "object" + }, "CmekSettings": { "description": "Describes the customer-managed encryption key (CMEK) settings associated with a project, folder, organization, billing account, or flexible resource.Note: CMEK for the Logs Router can currently only be configured for GCP organizations. Once configured, it applies to all projects and folders in the GCP organization.See Enabling CMEK for Logs Router (https://cloud.google.com/logging/docs/routing/managed-encryption) for more information.", "id": "CmekSettings", @@ -4993,6 +5547,93 @@ }, "type": "object" }, + "CopyLogEntriesMetadata": { + "description": "Metadata for CopyLogEntries long running operations.", + "id": "CopyLogEntriesMetadata", + "properties": { + "cancellationRequested": { + "description": "Identifies whether the user has requested cancellation of the operation.", + "type": "boolean" + }, + "endTime": { + "description": "The end time of an operation.", + "format": "google-datetime", + "type": "string" + }, + "progress": { + "description": "Estimated progress of the operation (0 - 100%).", + "format": "int32", + "type": "integer" + }, + "request": { + "$ref": "CopyLogEntriesRequest", + "description": "CopyLogEntries RPC request." + }, + "startTime": { + "description": "The create time of an operation.", + "format": "google-datetime", + "type": "string" + }, + "state": { + "description": "State of an operation.", + "enum": [ + "OPERATION_STATE_UNSPECIFIED", + "OPERATION_STATE_SCHEDULED", + "OPERATION_STATE_WAITING_FOR_PERMISSIONS", + "OPERATION_STATE_RUNNING", + "OPERATION_STATE_SUCCEEDED", + "OPERATION_STATE_FAILED", + "OPERATION_STATE_CANCELLED" + ], + "enumDescriptions": [ + "Should not be used.", + "The operation is scheduled.", + "Waiting for necessary permissions.", + "The operation is running.", + "The operation was completed successfully.", + "The operation failed.", + "The operation was cancelled by the user." + ], + "type": "string" + }, + "writerIdentity": { + "description": "The IAM identity of a service account that must be granted access to the destination. If the service account is not granted permission to the destination within an hour, the operation will be cancelled. Example: \"serviceAccount:foo@bar.com\"", + "type": "string" + } + }, + "type": "object" + }, + "CopyLogEntriesRequest": { + "description": "The parameters to CopyLogEntries.", + "id": "CopyLogEntriesRequest", + "properties": { + "destination": { + "description": "Required. Destination to which to copy logs.", + "type": "string" + }, + "filter": { + "description": "Optional. A filter specifying which log entries to copy. The filter must be no more than 20k characters. An empty filter matches all log entries.", + "type": "string" + }, + "name": { + "description": "Required. Bucket from which to copy logs. e.g. \"projects/my-project/locations/my-location/buckets/my-source-bucket", + "type": "string" + } + }, + "type": "object" + }, + "CopyLogEntriesResponse": { + "description": "Response type for CopyLogEntries long running operations.", + "id": "CopyLogEntriesResponse", + "properties": { + "logEntriesCopiedCount": { + "description": "Number of log entries copied.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, "Empty": { "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}.", "id": "Empty", @@ -5323,6 +5964,24 @@ }, "type": "object" }, + "ListOperationsResponse": { + "description": "The response message for Operations.ListOperations.", + "id": "ListOperationsResponse", + "properties": { + "nextPageToken": { + "description": "The standard List next-page token.", + "type": "string" + }, + "operations": { + "description": "A list of operations that matches the specified filter in the request.", + "items": { + "$ref": "Operation" + }, + "type": "array" + } + }, + "type": "object" + }, "ListSinksResponse": { "description": "Result returned from ListSinks.", "id": "ListSinksResponse", @@ -6100,6 +6759,41 @@ }, "type": "object" }, + "Operation": { + "description": "This resource represents a long-running operation that is the result of a network API call.", + "id": "Operation", + "properties": { + "done": { + "description": "If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.", + "type": "boolean" + }, + "error": { + "$ref": "Status", + "description": "The error result of the operation in case of failure or cancellation." + }, + "metadata": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", + "type": "object" + }, + "name": { + "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}.", + "type": "string" + }, + "response": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.", + "type": "object" + } + }, + "type": "object" + }, "RequestLog": { "description": "Complete log information about a single HTTP request to an App Engine application.", "id": "RequestLog", @@ -6289,6 +6983,33 @@ }, "type": "object" }, + "Status": { + "description": "The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors).", + "id": "Status", + "properties": { + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" + }, + "details": { + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", + "items": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", + "type": "string" + } + }, + "type": "object" + }, "SuppressionInfo": { "description": "Information about entries that were omitted from the session.", "id": "SuppressionInfo", diff --git a/googleapiclient/discovery_cache/documents/memcache.v1.json b/googleapiclient/discovery_cache/documents/memcache.v1.json index 1f6e90fad3b..fc220ff04eb 100644 --- a/googleapiclient/discovery_cache/documents/memcache.v1.json +++ b/googleapiclient/discovery_cache/documents/memcache.v1.json @@ -528,7 +528,7 @@ } } }, - "revision": "20210615", + "revision": "20210629", "rootUrl": "https://memcache.googleapis.com/", "schemas": { "ApplyParametersRequest": { diff --git a/googleapiclient/discovery_cache/documents/memcache.v1beta2.json b/googleapiclient/discovery_cache/documents/memcache.v1beta2.json index d21773c8565..d30db022871 100644 --- a/googleapiclient/discovery_cache/documents/memcache.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/memcache.v1beta2.json @@ -556,7 +556,7 @@ } } }, - "revision": "20210615", + "revision": "20210629", "rootUrl": "https://memcache.googleapis.com/", "schemas": { "ApplyParametersRequest": { diff --git a/googleapiclient/discovery_cache/documents/metastore.v1alpha.json b/googleapiclient/discovery_cache/documents/metastore.v1alpha.json index 94881fa8f9c..ae6f10c43a2 100644 --- a/googleapiclient/discovery_cache/documents/metastore.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/metastore.v1alpha.json @@ -986,7 +986,7 @@ } } }, - "revision": "20210615", + "revision": "20210622", "rootUrl": "https://metastore.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/metastore.v1beta.json b/googleapiclient/discovery_cache/documents/metastore.v1beta.json index e0b95e9d0fb..aecb452d6cd 100644 --- a/googleapiclient/discovery_cache/documents/metastore.v1beta.json +++ b/googleapiclient/discovery_cache/documents/metastore.v1beta.json @@ -986,7 +986,7 @@ } } }, - "revision": "20210615", + "revision": "20210622", "rootUrl": "https://metastore.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/monitoring.v1.json b/googleapiclient/discovery_cache/documents/monitoring.v1.json index 7302bc65d33..b8367bc0d53 100644 --- a/googleapiclient/discovery_cache/documents/monitoring.v1.json +++ b/googleapiclient/discovery_cache/documents/monitoring.v1.json @@ -285,7 +285,7 @@ } } }, - "revision": "20210618", + "revision": "20210702", "rootUrl": "https://monitoring.googleapis.com/", "schemas": { "Aggregation": { @@ -389,6 +389,17 @@ }, "type": "object" }, + "AlertChart": { + "description": "A chart that displays alert policy data.", + "id": "AlertChart", + "properties": { + "name": { + "description": "Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] ", + "type": "string" + } + }, + "type": "object" + }, "Axis": { "description": "A chart axis.", "id": "Axis", @@ -1217,6 +1228,10 @@ "description": "Widget contains a single dashboard component and configuration of how to present the component in the dashboard.", "id": "Widget", "properties": { + "alertChart": { + "$ref": "AlertChart", + "description": "A chart of alert policy data." + }, "blank": { "$ref": "Empty", "description": "A blank space." diff --git a/googleapiclient/discovery_cache/documents/monitoring.v3.json b/googleapiclient/discovery_cache/documents/monitoring.v3.json index 2c2f4e53265..7c2d2180151 100644 --- a/googleapiclient/discovery_cache/documents/monitoring.v3.json +++ b/googleapiclient/discovery_cache/documents/monitoring.v3.json @@ -2541,7 +2541,7 @@ } } }, - "revision": "20210618", + "revision": "20210702", "rootUrl": "https://monitoring.googleapis.com/", "schemas": { "Aggregation": { @@ -2649,6 +2649,10 @@ "description": "A description of the conditions under which some aspect of your system is considered to be \"unhealthy\" and the ways to notify people or services about this state. For an overview of alert policies, see Introduction to Alerting (https://cloud.google.com/monitoring/alerts/).", "id": "AlertPolicy", "properties": { + "alertStrategy": { + "$ref": "AlertStrategy", + "description": "Control over how this alert policy's notification channels are notified." + }, "combiner": { "description": "How to combine the results of multiple conditions to determine if an incident should be opened. If condition_time_series_query_language is present, this must be COMBINE_UNSPECIFIED.", "enum": [ @@ -2717,6 +2721,17 @@ }, "type": "object" }, + "AlertStrategy": { + "description": "Control over how the notification channels in notification_channels are notified when this alert fires.", + "id": "AlertStrategy", + "properties": { + "notificationRateLimit": { + "$ref": "NotificationRateLimit", + "description": "Required for alert policies with a LogMatch condition.Providing this for alert policies that are not log-based is unimplemented." + } + }, + "type": "object" + }, "AppEngine": { "description": "App Engine service. Learn more at https://cloud.google.com/appengine.", "id": "AppEngine", @@ -2965,6 +2980,10 @@ "$ref": "MetricAbsence", "description": "A condition that checks that a time series continues to receive new data points." }, + "conditionMatchedLog": { + "$ref": "LogMatch", + "description": "A condition that checks for log messages matching given constraints. If set, no other conditions can be present." + }, "conditionMonitoringQueryLanguage": { "$ref": "MonitoringQueryLanguageCondition", "description": "A condition that uses the Monitoring Query Language to define alerts." @@ -3911,6 +3930,24 @@ }, "type": "object" }, + "LogMatch": { + "description": "A condition type that checks whether a log message from any project monitored by the alert policy\u2019s workspace satisfies the given filter.", + "id": "LogMatch", + "properties": { + "filter": { + "description": "Required. A logs-based filter. See Advanced Logs Queries for how this filter should be constructed.", + "type": "string" + }, + "labelExtractors": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. A map from a label key to an extractor expression, which is used to extract the value for this label key. Each entry in this map is a specification for how data should be extracted from log entries that match filter. Each combination of extracted values is treated as a separate rule for the purposes of triggering notifications. Label keys and corresponding values can be used in notifications generated by this condition.Please see the documentation on logs-based metric valueExtractors for syntax and examples.", + "type": "object" + } + }, + "type": "object" + }, "MeshIstio": { "description": "Istio service scoped to an Istio mesh. Anthos clusters running ASM < 1.6.8 will have their services ingested as this type.", "id": "MeshIstio", @@ -4453,6 +4490,18 @@ }, "type": "object" }, + "NotificationRateLimit": { + "description": "Control over the rate of notifications sent to this alert policy's notification channels.", + "id": "NotificationRateLimit", + "properties": { + "period": { + "description": "Not more than one notification per period.", + "format": "google-duration", + "type": "string" + } + }, + "type": "object" + }, "OperationMetadata": { "description": "Contains metadata for longrunning operation for the edit Metrics Scope endpoints.", "id": "OperationMetadata", @@ -4711,6 +4760,13 @@ "telemetry": { "$ref": "Telemetry", "description": "Configuration for how to query telemetry on a Service." + }, + "userLabels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels which have been used to annotate the service. Label keys must start with a letter. Label keys and values may contain lowercase letters, numbers, underscores, and dashes. Label keys and values have a maximum length of 63 characters, and must be less than 128 bytes in size. Up to 64 label entries may be stored. For labels which do not have a semantic value, the empty string may be supplied for the label value.", + "type": "object" } }, "type": "object" diff --git a/googleapiclient/discovery_cache/documents/mybusinessaccountmanagement.v1.json b/googleapiclient/discovery_cache/documents/mybusinessaccountmanagement.v1.json index 5005cccdac9..a56654efe80 100644 --- a/googleapiclient/discovery_cache/documents/mybusinessaccountmanagement.v1.json +++ b/googleapiclient/discovery_cache/documents/mybusinessaccountmanagement.v1.json @@ -530,7 +530,7 @@ } } }, - "revision": "20210626", + "revision": "20210703", "rootUrl": "https://mybusinessaccountmanagement.googleapis.com/", "schemas": { "AcceptInvitationRequest": { diff --git a/googleapiclient/discovery_cache/documents/mybusinesslodging.v1.json b/googleapiclient/discovery_cache/documents/mybusinesslodging.v1.json index 4ea95aec0b7..e64606bd972 100644 --- a/googleapiclient/discovery_cache/documents/mybusinesslodging.v1.json +++ b/googleapiclient/discovery_cache/documents/mybusinesslodging.v1.json @@ -194,7 +194,7 @@ } } }, - "revision": "20210626", + "revision": "20210703", "rootUrl": "https://mybusinesslodging.googleapis.com/", "schemas": { "Accessibility": { diff --git a/googleapiclient/discovery_cache/documents/mybusinessplaceactions.v1.json b/googleapiclient/discovery_cache/documents/mybusinessplaceactions.v1.json index e428a8c5f47..bbde98eb68b 100644 --- a/googleapiclient/discovery_cache/documents/mybusinessplaceactions.v1.json +++ b/googleapiclient/discovery_cache/documents/mybusinessplaceactions.v1.json @@ -281,7 +281,7 @@ } } }, - "revision": "20210626", + "revision": "20210703", "rootUrl": "https://mybusinessplaceactions.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/networkconnectivity.v1alpha1.json b/googleapiclient/discovery_cache/documents/networkconnectivity.v1alpha1.json index b204b6cece5..5624a3e801d 100644 --- a/googleapiclient/discovery_cache/documents/networkconnectivity.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/networkconnectivity.v1alpha1.json @@ -12,7 +12,7 @@ "baseUrl": "https://networkconnectivity.googleapis.com/", "batchPath": "batch", "canonicalName": "networkconnectivity", - "description": "The Network Connectivity API will be home to various services which provide information pertaining to network connectivity.", + "description": "The Network Connectivity API provides access to Network Connectivity Center.", "discoveryVersion": "v1", "documentationLink": "https://cloud.google.com/network-connectivity/docs", "fullyEncodeReservedExpansion": true, @@ -1029,7 +1029,7 @@ } } }, - "revision": "20210616", + "revision": "20210701", "rootUrl": "https://networkconnectivity.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/networkmanagement.v1.json b/googleapiclient/discovery_cache/documents/networkmanagement.v1.json index 22dbdc6fb82..f473527bbc3 100644 --- a/googleapiclient/discovery_cache/documents/networkmanagement.v1.json +++ b/googleapiclient/discovery_cache/documents/networkmanagement.v1.json @@ -591,7 +591,7 @@ } } }, - "revision": "20210616", + "revision": "20210624", "rootUrl": "https://networkmanagement.googleapis.com/", "schemas": { "AbortInfo": { diff --git a/googleapiclient/discovery_cache/documents/networkmanagement.v1beta1.json b/googleapiclient/discovery_cache/documents/networkmanagement.v1beta1.json index ac81004d8d0..10bc94a3f64 100644 --- a/googleapiclient/discovery_cache/documents/networkmanagement.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/networkmanagement.v1beta1.json @@ -591,7 +591,7 @@ } } }, - "revision": "20210616", + "revision": "20210624", "rootUrl": "https://networkmanagement.googleapis.com/", "schemas": { "AbortInfo": { diff --git a/googleapiclient/discovery_cache/documents/ondemandscanning.v1.json b/googleapiclient/discovery_cache/documents/ondemandscanning.v1.json index 5b3e8a001e3..32f9bc8fa74 100644 --- a/googleapiclient/discovery_cache/documents/ondemandscanning.v1.json +++ b/googleapiclient/discovery_cache/documents/ondemandscanning.v1.json @@ -339,7 +339,7 @@ } } }, - "revision": "20210618", + "revision": "20210628", "rootUrl": "https://ondemandscanning.googleapis.com/", "schemas": { "AliasContext": { diff --git a/googleapiclient/discovery_cache/documents/ondemandscanning.v1beta1.json b/googleapiclient/discovery_cache/documents/ondemandscanning.v1beta1.json index 48530114004..2bc429cdaec 100644 --- a/googleapiclient/discovery_cache/documents/ondemandscanning.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/ondemandscanning.v1beta1.json @@ -339,7 +339,7 @@ } } }, - "revision": "20210618", + "revision": "20210628", "rootUrl": "https://ondemandscanning.googleapis.com/", "schemas": { "AliasContext": { diff --git a/googleapiclient/discovery_cache/documents/orgpolicy.v2.json b/googleapiclient/discovery_cache/documents/orgpolicy.v2.json index 0dd555c7627..f868e4d824d 100644 --- a/googleapiclient/discovery_cache/documents/orgpolicy.v2.json +++ b/googleapiclient/discovery_cache/documents/orgpolicy.v2.json @@ -751,7 +751,7 @@ } } }, - "revision": "20210625", + "revision": "20210701", "rootUrl": "https://orgpolicy.googleapis.com/", "schemas": { "GoogleCloudOrgpolicyV2Constraint": { diff --git a/googleapiclient/discovery_cache/documents/osconfig.v1.json b/googleapiclient/discovery_cache/documents/osconfig.v1.json index 0b575317151..b52b2844942 100644 --- a/googleapiclient/discovery_cache/documents/osconfig.v1.json +++ b/googleapiclient/discovery_cache/documents/osconfig.v1.json @@ -476,7 +476,7 @@ } } }, - "revision": "20210618", + "revision": "20210626", "rootUrl": "https://osconfig.googleapis.com/", "schemas": { "AptSettings": { diff --git a/googleapiclient/discovery_cache/documents/osconfig.v1alpha.json b/googleapiclient/discovery_cache/documents/osconfig.v1alpha.json index 0f734172b2e..c2ba5e25097 100644 --- a/googleapiclient/discovery_cache/documents/osconfig.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/osconfig.v1alpha.json @@ -610,7 +610,7 @@ } } }, - "revision": "20210618", + "revision": "20210626", "rootUrl": "https://osconfig.googleapis.com/", "schemas": { "CVSSv3": { diff --git a/googleapiclient/discovery_cache/documents/osconfig.v1beta.json b/googleapiclient/discovery_cache/documents/osconfig.v1beta.json index 1f2c029399d..483d4803ea5 100644 --- a/googleapiclient/discovery_cache/documents/osconfig.v1beta.json +++ b/googleapiclient/discovery_cache/documents/osconfig.v1beta.json @@ -599,7 +599,7 @@ } } }, - "revision": "20210618", + "revision": "20210626", "rootUrl": "https://osconfig.googleapis.com/", "schemas": { "AptRepository": { diff --git a/googleapiclient/discovery_cache/documents/oslogin.v1.json b/googleapiclient/discovery_cache/documents/oslogin.v1.json index 9e442751ca9..b2272a47cca 100644 --- a/googleapiclient/discovery_cache/documents/oslogin.v1.json +++ b/googleapiclient/discovery_cache/documents/oslogin.v1.json @@ -306,7 +306,7 @@ } } }, - "revision": "20210618", + "revision": "20210626", "rootUrl": "https://oslogin.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/oslogin.v1alpha.json b/googleapiclient/discovery_cache/documents/oslogin.v1alpha.json index ca76963ae72..e640a357bdc 100644 --- a/googleapiclient/discovery_cache/documents/oslogin.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/oslogin.v1alpha.json @@ -374,7 +374,7 @@ } } }, - "revision": "20210618", + "revision": "20210626", "rootUrl": "https://oslogin.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/oslogin.v1beta.json b/googleapiclient/discovery_cache/documents/oslogin.v1beta.json index 0dd8d370c97..24802ddcd90 100644 --- a/googleapiclient/discovery_cache/documents/oslogin.v1beta.json +++ b/googleapiclient/discovery_cache/documents/oslogin.v1beta.json @@ -344,7 +344,7 @@ } } }, - "revision": "20210618", + "revision": "20210626", "rootUrl": "https://oslogin.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/pagespeedonline.v5.json b/googleapiclient/discovery_cache/documents/pagespeedonline.v5.json index b9bfa05c222..1d25cbcf63b 100644 --- a/googleapiclient/discovery_cache/documents/pagespeedonline.v5.json +++ b/googleapiclient/discovery_cache/documents/pagespeedonline.v5.json @@ -193,7 +193,7 @@ } } }, - "revision": "20210625", + "revision": "20210701", "rootUrl": "https://pagespeedonline.googleapis.com/", "schemas": { "AuditRefs": { diff --git a/googleapiclient/discovery_cache/documents/paymentsresellersubscription.v1.json b/googleapiclient/discovery_cache/documents/paymentsresellersubscription.v1.json index 2626eedda3e..0ab5219ef90 100644 --- a/googleapiclient/discovery_cache/documents/paymentsresellersubscription.v1.json +++ b/googleapiclient/discovery_cache/documents/paymentsresellersubscription.v1.json @@ -180,7 +180,7 @@ "subscriptions": { "methods": { "cancel": { - "description": "Used by partners to cancel a subscription service by the end of the current billing cycle for their customers. It should be called directly by the partner using service accounts.", + "description": "Used by partners to cancel a subscription service either immediately or by the end of the current billing cycle for their customers. It should be called directly by the partner using service accounts.", "flatPath": "v1/partners/{partnersId}/subscriptions/{subscriptionsId}:cancel", "httpMethod": "POST", "id": "paymentsresellersubscription.partners.subscriptions.cancel", @@ -366,7 +366,7 @@ } } }, - "revision": "20210627", + "revision": "20210705", "rootUrl": "https://paymentsresellersubscription.googleapis.com/", "schemas": { "GoogleCloudPaymentsResellerSubscriptionV1CancelSubscriptionRequest": { @@ -675,6 +675,21 @@ "description": "Required. Identifier of the end-user in partner\u2019s system. The value is restricted to 63 ASCII characters at the maximum.", "type": "string" }, + "processingState": { + "description": "Output only. Describes the processing state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle).", + "enum": [ + "PROCESSING_STATE_UNSPECIFIED", + "PROCESSING_STATE_CANCELLING", + "PROCESSING_STATE_RECURRING" + ], + "enumDescriptions": [ + "The processing state is unspecified.", + "The subscription is being cancelled.", + "The subscription is recurring." + ], + "readOnly": true, + "type": "string" + }, "products": { "description": "Required. Resource name that identifies one or more subscription products. The format will be 'partners/{partner_id}/products/{product_id}'.", "items": { @@ -699,7 +714,7 @@ "description": "Required. The location that the service is provided as indicated by the partner." }, "state": { - "description": "Output only. Descibes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle).", + "description": "Output only. Describes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle).", "enum": [ "STATE_UNSPECIFIED", "STATE_CREATED", @@ -733,7 +748,7 @@ "type": "object" }, "GoogleCloudPaymentsResellerSubscriptionV1SubscriptionCancellationDetails": { - "description": "Describes the details of a cancelled subscription.", + "description": "Describes the details of a cancelled or cancelling subscription.", "id": "GoogleCloudPaymentsResellerSubscriptionV1SubscriptionCancellationDetails", "properties": { "reason": { diff --git a/googleapiclient/discovery_cache/documents/people.v1.json b/googleapiclient/discovery_cache/documents/people.v1.json index b5354128b3f..edd38864c34 100644 --- a/googleapiclient/discovery_cache/documents/people.v1.json +++ b/googleapiclient/discovery_cache/documents/people.v1.json @@ -1154,7 +1154,7 @@ } } }, - "revision": "20210624", + "revision": "20210701", "rootUrl": "https://people.googleapis.com/", "schemas": { "Address": { diff --git a/googleapiclient/discovery_cache/documents/playablelocations.v3.json b/googleapiclient/discovery_cache/documents/playablelocations.v3.json index 88419563aef..0c99f13e452 100644 --- a/googleapiclient/discovery_cache/documents/playablelocations.v3.json +++ b/googleapiclient/discovery_cache/documents/playablelocations.v3.json @@ -146,7 +146,7 @@ } } }, - "revision": "20210626", + "revision": "20210703", "rootUrl": "https://playablelocations.googleapis.com/", "schemas": { "GoogleMapsPlayablelocationsV3Impression": { diff --git a/googleapiclient/discovery_cache/documents/playcustomapp.v1.json b/googleapiclient/discovery_cache/documents/playcustomapp.v1.json index 6b759678db2..1dc06681076 100644 --- a/googleapiclient/discovery_cache/documents/playcustomapp.v1.json +++ b/googleapiclient/discovery_cache/documents/playcustomapp.v1.json @@ -158,7 +158,7 @@ } } }, - "revision": "20210625", + "revision": "20210703", "rootUrl": "https://playcustomapp.googleapis.com/", "schemas": { "CustomApp": { diff --git a/googleapiclient/discovery_cache/documents/policysimulator.v1.json b/googleapiclient/discovery_cache/documents/policysimulator.v1.json index 269ab9e8d67..f1733f3f09a 100644 --- a/googleapiclient/discovery_cache/documents/policysimulator.v1.json +++ b/googleapiclient/discovery_cache/documents/policysimulator.v1.json @@ -493,7 +493,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://policysimulator.googleapis.com/", "schemas": { "GoogleCloudPolicysimulatorV1AccessStateDiff": { diff --git a/googleapiclient/discovery_cache/documents/policysimulator.v1beta1.json b/googleapiclient/discovery_cache/documents/policysimulator.v1beta1.json index 6314a5e4f7a..2f74fde0c0d 100644 --- a/googleapiclient/discovery_cache/documents/policysimulator.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/policysimulator.v1beta1.json @@ -493,7 +493,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://policysimulator.googleapis.com/", "schemas": { "GoogleCloudPolicysimulatorV1Replay": { diff --git a/googleapiclient/discovery_cache/documents/privateca.v1.json b/googleapiclient/discovery_cache/documents/privateca.v1.json index f113bfc2a0e..d3a10e710fd 100644 --- a/googleapiclient/discovery_cache/documents/privateca.v1.json +++ b/googleapiclient/discovery_cache/documents/privateca.v1.json @@ -1590,7 +1590,7 @@ } } }, - "revision": "20210621", + "revision": "20210624", "rootUrl": "https://privateca.googleapis.com/", "schemas": { "AccessUrls": { diff --git a/googleapiclient/discovery_cache/documents/privateca.v1beta1.json b/googleapiclient/discovery_cache/documents/privateca.v1beta1.json index 2717b5dedff..fb1e253ec7f 100644 --- a/googleapiclient/discovery_cache/documents/privateca.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/privateca.v1beta1.json @@ -1254,7 +1254,7 @@ } } }, - "revision": "20210621", + "revision": "20210624", "rootUrl": "https://privateca.googleapis.com/", "schemas": { "AccessUrls": { diff --git a/googleapiclient/discovery_cache/documents/prod_tt_sasportal.v1alpha1.json b/googleapiclient/discovery_cache/documents/prod_tt_sasportal.v1alpha1.json index 511618819b0..3c554aec52b 100644 --- a/googleapiclient/discovery_cache/documents/prod_tt_sasportal.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/prod_tt_sasportal.v1alpha1.json @@ -2484,7 +2484,7 @@ } } }, - "revision": "20210625", + "revision": "20210703", "rootUrl": "https://prod-tt-sasportal.googleapis.com/", "schemas": { "SasPortalAssignment": { diff --git a/googleapiclient/discovery_cache/documents/pubsub.v1.json b/googleapiclient/discovery_cache/documents/pubsub.v1.json index 2cfc0f697a9..89afc202a14 100644 --- a/googleapiclient/discovery_cache/documents/pubsub.v1.json +++ b/googleapiclient/discovery_cache/documents/pubsub.v1.json @@ -1424,7 +1424,7 @@ } } }, - "revision": "20210614", + "revision": "20210622", "rootUrl": "https://pubsub.googleapis.com/", "schemas": { "AcknowledgeRequest": { diff --git a/googleapiclient/discovery_cache/documents/pubsub.v1beta1a.json b/googleapiclient/discovery_cache/documents/pubsub.v1beta1a.json index a4b96106485..c8a0b548588 100644 --- a/googleapiclient/discovery_cache/documents/pubsub.v1beta1a.json +++ b/googleapiclient/discovery_cache/documents/pubsub.v1beta1a.json @@ -457,7 +457,7 @@ } } }, - "revision": "20210614", + "revision": "20210622", "rootUrl": "https://pubsub.googleapis.com/", "schemas": { "AcknowledgeRequest": { diff --git a/googleapiclient/discovery_cache/documents/pubsub.v1beta2.json b/googleapiclient/discovery_cache/documents/pubsub.v1beta2.json index 244e5447a37..03e8c96fc39 100644 --- a/googleapiclient/discovery_cache/documents/pubsub.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/pubsub.v1beta2.json @@ -724,7 +724,7 @@ } } }, - "revision": "20210614", + "revision": "20210622", "rootUrl": "https://pubsub.googleapis.com/", "schemas": { "AcknowledgeRequest": { diff --git a/googleapiclient/discovery_cache/documents/pubsublite.v1.json b/googleapiclient/discovery_cache/documents/pubsublite.v1.json index f779beae6c7..baa26387151 100644 --- a/googleapiclient/discovery_cache/documents/pubsublite.v1.json +++ b/googleapiclient/discovery_cache/documents/pubsublite.v1.json @@ -690,7 +690,7 @@ } } }, - "revision": "20210622", + "revision": "20210629", "rootUrl": "https://pubsublite.googleapis.com/", "schemas": { "Capacity": { diff --git a/googleapiclient/discovery_cache/documents/realtimebidding.v1.json b/googleapiclient/discovery_cache/documents/realtimebidding.v1.json index 8af6f4f0efd..b55b3fa93a7 100644 --- a/googleapiclient/discovery_cache/documents/realtimebidding.v1.json +++ b/googleapiclient/discovery_cache/documents/realtimebidding.v1.json @@ -1140,7 +1140,7 @@ } } }, - "revision": "20210626", + "revision": "20210701", "rootUrl": "https://realtimebidding.googleapis.com/", "schemas": { "ActivatePretargetingConfigRequest": { diff --git a/googleapiclient/discovery_cache/documents/realtimebidding.v1alpha.json b/googleapiclient/discovery_cache/documents/realtimebidding.v1alpha.json index 5f73a672e98..07455ed06a6 100644 --- a/googleapiclient/discovery_cache/documents/realtimebidding.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/realtimebidding.v1alpha.json @@ -234,7 +234,7 @@ } } }, - "revision": "20210624", + "revision": "20210701", "rootUrl": "https://realtimebidding.googleapis.com/", "schemas": { "ActivateBiddingFunctionRequest": { diff --git a/googleapiclient/discovery_cache/documents/recaptchaenterprise.v1.json b/googleapiclient/discovery_cache/documents/recaptchaenterprise.v1.json index 27fc0ea974e..a7bfb2a7574 100644 --- a/googleapiclient/discovery_cache/documents/recaptchaenterprise.v1.json +++ b/googleapiclient/discovery_cache/documents/recaptchaenterprise.v1.json @@ -375,7 +375,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://recaptchaenterprise.googleapis.com/", "schemas": { "GoogleCloudRecaptchaenterpriseV1AndroidKeySettings": { diff --git a/googleapiclient/discovery_cache/documents/recommender.v1.json b/googleapiclient/discovery_cache/documents/recommender.v1.json index f711bfb2ca5..4de99e0473c 100644 --- a/googleapiclient/discovery_cache/documents/recommender.v1.json +++ b/googleapiclient/discovery_cache/documents/recommender.v1.json @@ -1178,7 +1178,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://recommender.googleapis.com/", "schemas": { "GoogleCloudRecommenderV1CostProjection": { diff --git a/googleapiclient/discovery_cache/documents/recommender.v1beta1.json b/googleapiclient/discovery_cache/documents/recommender.v1beta1.json index 49edc02f1df..ce8b9193e4d 100644 --- a/googleapiclient/discovery_cache/documents/recommender.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/recommender.v1beta1.json @@ -1178,7 +1178,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://recommender.googleapis.com/", "schemas": { "GoogleCloudRecommenderV1beta1CostProjection": { diff --git a/googleapiclient/discovery_cache/documents/redis.v1.json b/googleapiclient/discovery_cache/documents/redis.v1.json index c629224db5c..2f8beeca8e1 100644 --- a/googleapiclient/discovery_cache/documents/redis.v1.json +++ b/googleapiclient/discovery_cache/documents/redis.v1.json @@ -624,7 +624,7 @@ } } }, - "revision": "20210616", + "revision": "20210624", "rootUrl": "https://redis.googleapis.com/", "schemas": { "Empty": { @@ -768,7 +768,7 @@ "type": "object" }, "Instance": { - "description": "A Google Cloud Redis instance.", + "description": "A Google Cloud Redis instance. next id = 30", "id": "Instance", "properties": { "alternativeLocationId": { diff --git a/googleapiclient/discovery_cache/documents/redis.v1beta1.json b/googleapiclient/discovery_cache/documents/redis.v1beta1.json index c6adf234262..9f1a84b01df 100644 --- a/googleapiclient/discovery_cache/documents/redis.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/redis.v1beta1.json @@ -624,7 +624,7 @@ } } }, - "revision": "20210616", + "revision": "20210624", "rootUrl": "https://redis.googleapis.com/", "schemas": { "Empty": { @@ -692,33 +692,40 @@ "id": "GoogleCloudCommonOperationMetadata", "properties": { "apiVersion": { - "description": "[Output only] API version used to start the operation.", + "description": "Output only. API version used to start the operation.", + "readOnly": true, "type": "string" }, "cancelRequested": { - "description": "[Output only] Identifies whether the user has requested cancellation of the operation. Operations that have successfully been cancelled have Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", + "description": "Output only. Identifies whether the user has requested cancellation of the operation. Operations that have successfully been cancelled have Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", + "readOnly": true, "type": "boolean" }, "createTime": { - "description": "[Output only] The time the operation was created.", + "description": "Output only. The time the operation was created.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "endTime": { - "description": "[Output only] The time the operation finished running.", + "description": "Output only. The time the operation finished running.", "format": "google-datetime", + "readOnly": true, "type": "string" }, "statusDetail": { - "description": "[Output only] Human-readable status of the operation, if any.", + "description": "Output only. Human-readable status of the operation, if any.", + "readOnly": true, "type": "string" }, "target": { - "description": "[Output only] Server-defined resource path for the target of the operation.", + "description": "Output only. Server-defined resource path for the target of the operation.", + "readOnly": true, "type": "string" }, "verb": { - "description": "[Output only] Name of the verb executed by the operation.", + "description": "Output only. Name of the verb executed by the operation.", + "readOnly": true, "type": "string" } }, @@ -768,7 +775,7 @@ "type": "object" }, "Instance": { - "description": "A Google Cloud Redis instance.", + "description": "A Google Cloud Redis instance. next id = 30", "id": "Instance", "properties": { "alternativeLocationId": { diff --git a/googleapiclient/discovery_cache/documents/remotebuildexecution.v1.json b/googleapiclient/discovery_cache/documents/remotebuildexecution.v1.json index 74cc9160ac6..f3e24e94a37 100644 --- a/googleapiclient/discovery_cache/documents/remotebuildexecution.v1.json +++ b/googleapiclient/discovery_cache/documents/remotebuildexecution.v1.json @@ -307,7 +307,7 @@ } } }, - "revision": "20210622", + "revision": "20210629", "rootUrl": "https://remotebuildexecution.googleapis.com/", "schemas": { "BuildBazelRemoteExecutionV2Action": { diff --git a/googleapiclient/discovery_cache/documents/remotebuildexecution.v1alpha.json b/googleapiclient/discovery_cache/documents/remotebuildexecution.v1alpha.json index 2292cf78d59..d019d372754 100644 --- a/googleapiclient/discovery_cache/documents/remotebuildexecution.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/remotebuildexecution.v1alpha.json @@ -432,7 +432,7 @@ } } }, - "revision": "20210623", + "revision": "20210629", "rootUrl": "https://admin-remotebuildexecution.googleapis.com/", "schemas": { "BuildBazelRemoteExecutionV2Action": { diff --git a/googleapiclient/discovery_cache/documents/remotebuildexecution.v2.json b/googleapiclient/discovery_cache/documents/remotebuildexecution.v2.json index 38f94dcff38..9d63e516ab9 100644 --- a/googleapiclient/discovery_cache/documents/remotebuildexecution.v2.json +++ b/googleapiclient/discovery_cache/documents/remotebuildexecution.v2.json @@ -447,7 +447,7 @@ } } }, - "revision": "20210622", + "revision": "20210629", "rootUrl": "https://remotebuildexecution.googleapis.com/", "schemas": { "BuildBazelRemoteExecutionV2Action": { diff --git a/googleapiclient/discovery_cache/documents/reseller.v1.json b/googleapiclient/discovery_cache/documents/reseller.v1.json index a6a4a7dec1b..81d3bb30495 100644 --- a/googleapiclient/discovery_cache/documents/reseller.v1.json +++ b/googleapiclient/discovery_cache/documents/reseller.v1.json @@ -631,7 +631,7 @@ } } }, - "revision": "20210623", + "revision": "20210629", "rootUrl": "https://reseller.googleapis.com/", "schemas": { "Address": { diff --git a/googleapiclient/discovery_cache/documents/resourcesettings.v1.json b/googleapiclient/discovery_cache/documents/resourcesettings.v1.json index 4cf1b15d6d8..2ad6c13c682 100644 --- a/googleapiclient/discovery_cache/documents/resourcesettings.v1.json +++ b/googleapiclient/discovery_cache/documents/resourcesettings.v1.json @@ -499,7 +499,7 @@ } } }, - "revision": "20210625", + "revision": "20210703", "rootUrl": "https://resourcesettings.googleapis.com/", "schemas": { "GoogleCloudResourcesettingsV1ListSettingsResponse": { diff --git a/googleapiclient/discovery_cache/documents/retail.v2.json b/googleapiclient/discovery_cache/documents/retail.v2.json index 014b4a83d2b..40d07a0dc72 100644 --- a/googleapiclient/discovery_cache/documents/retail.v2.json +++ b/googleapiclient/discovery_cache/documents/retail.v2.json @@ -706,7 +706,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://retail.googleapis.com/", "schemas": { "GoogleApiHttpBody": { @@ -1125,7 +1125,7 @@ "additionalProperties": { "type": "any" }, - "description": "Additional domain specific parameters for the predictions. Allowed values: * `returnProduct`: Boolean. If set to true, the associated product object will be returned in the `results.metadata` field in the prediction response. * `returnScore`: Boolean. If set to true, the prediction 'score' corresponding to each returned product will be set in the `results.metadata` field in the prediction response. The given 'score' indicates the probability of an product being clicked/purchased given the user's context and history. * `strictFiltering`: Boolean. True by default. If set to false, the service will return generic (unfiltered) popular products instead of empty if your filter blocks all prediction results. * `priceRerankLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-price-reranking', 'low-price-reranking', 'medium-price-reranking', 'high-price-reranking'}. This gives request level control and adjust prediction results based on product price. * `diversityLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-diversity', 'low-diversity', 'medium-diversity', 'high-diversity', 'auto-diversity'}. This gives request level control and adjust prediction results based on product category.", + "description": "Additional domain specific parameters for the predictions. Allowed values: * `returnProduct`: Boolean. If set to true, the associated product object will be returned in the `results.metadata` field in the prediction response. * `returnScore`: Boolean. If set to true, the prediction 'score' corresponding to each returned product will be set in the `results.metadata` field in the prediction response. The given 'score' indicates the probability of an product being clicked/purchased given the user's context and history. * `strictFiltering`: Boolean. True by default. If set to false, the service will return generic (unfiltered) popular products instead of empty if your filter blocks all prediction results. * `priceRerankLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-price-reranking', 'low-price-reranking', 'medium-price-reranking', 'high-price-reranking'}. This gives request-level control and adjusts prediction results based on product price. * `diversityLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-diversity', 'low-diversity', 'medium-diversity', 'high-diversity', 'auto-diversity'}. This gives request-level control and adjusts prediction results based on product category.", "type": "object" }, "userEvent": { @@ -1624,11 +1624,11 @@ "type": "boolean" }, "ipAddress": { - "description": "The end user's IP address. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. \"104.133.9.80\") or an IPv6 address (e.g. \"2001:0db8:85a3:0000:0000:8a2e:0370:7334\"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set.", + "description": "The end user's IP address. Required for getting SearchRespons.sponsored_results. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. \"104.133.9.80\") or an IPv6 address (e.g. \"2001:0db8:85a3:0000:0000:8a2e:0370:7334\"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set.", "type": "string" }, "userAgent": { - "description": "User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set.", + "description": "User agent as included in the HTTP header. Required for getting SearchRespons.sponsored_results. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set.", "type": "string" }, "userId": { diff --git a/googleapiclient/discovery_cache/documents/retail.v2alpha.json b/googleapiclient/discovery_cache/documents/retail.v2alpha.json index fa90052d1f5..afa91a7597a 100644 --- a/googleapiclient/discovery_cache/documents/retail.v2alpha.json +++ b/googleapiclient/discovery_cache/documents/retail.v2alpha.json @@ -706,7 +706,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://retail.googleapis.com/", "schemas": { "GoogleApiHttpBody": { @@ -1320,7 +1320,7 @@ "additionalProperties": { "type": "any" }, - "description": "Additional domain specific parameters for the predictions. Allowed values: * `returnProduct`: Boolean. If set to true, the associated product object will be returned in the `results.metadata` field in the prediction response. * `returnScore`: Boolean. If set to true, the prediction 'score' corresponding to each returned product will be set in the `results.metadata` field in the prediction response. The given 'score' indicates the probability of an product being clicked/purchased given the user's context and history. * `strictFiltering`: Boolean. True by default. If set to false, the service will return generic (unfiltered) popular products instead of empty if your filter blocks all prediction results. * `priceRerankLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-price-reranking', 'low-price-reranking', 'medium-price-reranking', 'high-price-reranking'}. This gives request level control and adjust prediction results based on product price. * `diversityLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-diversity', 'low-diversity', 'medium-diversity', 'high-diversity', 'auto-diversity'}. This gives request level control and adjust prediction results based on product category.", + "description": "Additional domain specific parameters for the predictions. Allowed values: * `returnProduct`: Boolean. If set to true, the associated product object will be returned in the `results.metadata` field in the prediction response. * `returnScore`: Boolean. If set to true, the prediction 'score' corresponding to each returned product will be set in the `results.metadata` field in the prediction response. The given 'score' indicates the probability of an product being clicked/purchased given the user's context and history. * `strictFiltering`: Boolean. True by default. If set to false, the service will return generic (unfiltered) popular products instead of empty if your filter blocks all prediction results. * `priceRerankLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-price-reranking', 'low-price-reranking', 'medium-price-reranking', 'high-price-reranking'}. This gives request-level control and adjusts prediction results based on product price. * `diversityLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-diversity', 'low-diversity', 'medium-diversity', 'high-diversity', 'auto-diversity'}. This gives request-level control and adjusts prediction results based on product category.", "type": "object" }, "userEvent": { @@ -1819,11 +1819,11 @@ "type": "boolean" }, "ipAddress": { - "description": "The end user's IP address. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. \"104.133.9.80\") or an IPv6 address (e.g. \"2001:0db8:85a3:0000:0000:8a2e:0370:7334\"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set.", + "description": "The end user's IP address. Required for getting SearchRespons.sponsored_results. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. \"104.133.9.80\") or an IPv6 address (e.g. \"2001:0db8:85a3:0000:0000:8a2e:0370:7334\"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set.", "type": "string" }, "userAgent": { - "description": "User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set.", + "description": "User agent as included in the HTTP header. Required for getting SearchRespons.sponsored_results. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set.", "type": "string" }, "userId": { diff --git a/googleapiclient/discovery_cache/documents/retail.v2beta.json b/googleapiclient/discovery_cache/documents/retail.v2beta.json index 5aa8b9a27eb..b77947ddb10 100644 --- a/googleapiclient/discovery_cache/documents/retail.v2beta.json +++ b/googleapiclient/discovery_cache/documents/retail.v2beta.json @@ -706,7 +706,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://retail.googleapis.com/", "schemas": { "GoogleApiHttpBody": { @@ -1515,7 +1515,7 @@ "additionalProperties": { "type": "any" }, - "description": "Additional domain specific parameters for the predictions. Allowed values: * `returnProduct`: Boolean. If set to true, the associated product object will be returned in the `results.metadata` field in the prediction response. * `returnScore`: Boolean. If set to true, the prediction 'score' corresponding to each returned product will be set in the `results.metadata` field in the prediction response. The given 'score' indicates the probability of an product being clicked/purchased given the user's context and history. * `strictFiltering`: Boolean. True by default. If set to false, the service will return generic (unfiltered) popular products instead of empty if your filter blocks all prediction results. * `priceRerankLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-price-reranking', 'low-price-reranking', 'medium-price-reranking', 'high-price-reranking'}. This gives request level control and adjust prediction results based on product price. * `diversityLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-diversity', 'low-diversity', 'medium-diversity', 'high-diversity', 'auto-diversity'}. This gives request level control and adjust prediction results based on product category.", + "description": "Additional domain specific parameters for the predictions. Allowed values: * `returnProduct`: Boolean. If set to true, the associated product object will be returned in the `results.metadata` field in the prediction response. * `returnScore`: Boolean. If set to true, the prediction 'score' corresponding to each returned product will be set in the `results.metadata` field in the prediction response. The given 'score' indicates the probability of an product being clicked/purchased given the user's context and history. * `strictFiltering`: Boolean. True by default. If set to false, the service will return generic (unfiltered) popular products instead of empty if your filter blocks all prediction results. * `priceRerankLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-price-reranking', 'low-price-reranking', 'medium-price-reranking', 'high-price-reranking'}. This gives request-level control and adjusts prediction results based on product price. * `diversityLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-diversity', 'low-diversity', 'medium-diversity', 'high-diversity', 'auto-diversity'}. This gives request-level control and adjusts prediction results based on product category.", "type": "object" }, "userEvent": { @@ -2014,11 +2014,11 @@ "type": "boolean" }, "ipAddress": { - "description": "The end user's IP address. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. \"104.133.9.80\") or an IPv6 address (e.g. \"2001:0db8:85a3:0000:0000:8a2e:0370:7334\"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set.", + "description": "The end user's IP address. Required for getting SearchRespons.sponsored_results. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. \"104.133.9.80\") or an IPv6 address (e.g. \"2001:0db8:85a3:0000:0000:8a2e:0370:7334\"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set.", "type": "string" }, "userAgent": { - "description": "User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set.", + "description": "User agent as included in the HTTP header. Required for getting SearchRespons.sponsored_results. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set.", "type": "string" }, "userId": { diff --git a/googleapiclient/discovery_cache/documents/run.v1.json b/googleapiclient/discovery_cache/documents/run.v1.json index 3bfd31cc6c0..36e5de5436a 100644 --- a/googleapiclient/discovery_cache/documents/run.v1.json +++ b/googleapiclient/discovery_cache/documents/run.v1.json @@ -12,7 +12,7 @@ "baseUrl": "https://run.googleapis.com/", "batchPath": "batch", "canonicalName": "Cloud Run", - "description": "Deploy and manage user provided container images that scale automatically based on incoming requets. The Cloud Run Admin API follows the Knative Serving API specification.", + "description": "Deploy and manage user provided container images that scale automatically based on incoming requests. The Cloud Run Admin API follows the Knative Serving API specification.", "discoveryVersion": "v1", "documentationLink": "https://cloud.google.com/run/", "fullyEncodeReservedExpansion": true, @@ -1736,7 +1736,7 @@ } } }, - "revision": "20210618", + "revision": "20210628", "rootUrl": "https://run.googleapis.com/", "schemas": { "Addressable": { diff --git a/googleapiclient/discovery_cache/documents/run.v1alpha1.json b/googleapiclient/discovery_cache/documents/run.v1alpha1.json index 2fb1adad844..3da6051fc8e 100644 --- a/googleapiclient/discovery_cache/documents/run.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/run.v1alpha1.json @@ -12,7 +12,7 @@ "baseUrl": "https://run.googleapis.com/", "batchPath": "batch", "canonicalName": "Cloud Run", - "description": "Deploy and manage user provided container images that scale automatically based on incoming requets. The Cloud Run Admin API follows the Knative Serving API specification.", + "description": "Deploy and manage user provided container images that scale automatically based on incoming requests. The Cloud Run Admin API follows the Knative Serving API specification.", "discoveryVersion": "v1", "documentationLink": "https://cloud.google.com/run/", "fullyEncodeReservedExpansion": true, @@ -268,32 +268,11 @@ } } }, - "revision": "20210618", + "revision": "20210628", "rootUrl": "https://run.googleapis.com/", "schemas": { - "Capabilities": { - "description": "Adds and removes POSIX capabilities from running containers.", - "id": "Capabilities", - "properties": { - "add": { - "description": "Added capabilities +optional", - "items": { - "type": "string" - }, - "type": "array" - }, - "drop": { - "description": "Removed capabilities +optional", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, "ConfigMapEnvSource": { - "description": "ConfigMapEnvSource selects a ConfigMap to populate the environment variables with. The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.", + "description": "Not supported by Cloud Run ConfigMapEnvSource selects a ConfigMap to populate the environment variables with. The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.", "id": "ConfigMapEnvSource", "properties": { "localObjectReference": { @@ -301,22 +280,22 @@ "description": "This field should not be used directly as it is meant to be inlined directly into the message. Use the \"name\" field instead." }, "name": { - "description": "Cloud Run fully managed: not supported Cloud Run for Anthos: supported The ConfigMap to select from.", + "description": "The ConfigMap to select from.", "type": "string" }, "optional": { - "description": "Cloud Run fully managed: not supported Cloud Run for Anthos: supported Specify whether the ConfigMap must be defined +optional", + "description": "(Optional) Specify whether the ConfigMap must be defined", "type": "boolean" } }, "type": "object" }, "ConfigMapKeySelector": { - "description": "Cloud Run fully managed: not supported Cloud Run on GKE: supported Selects a key from a ConfigMap.", + "description": "Not supported by Cloud Run Selects a key from a ConfigMap.", "id": "ConfigMapKeySelector", "properties": { "key": { - "description": "Cloud Run fully managed: not supported Cloud Run on GKE: supported The key to select.", + "description": "The key to select.", "type": "string" }, "localObjectReference": { @@ -324,27 +303,27 @@ "description": "This field should not be used directly as it is meant to be inlined directly into the message. Use the \"name\" field instead." }, "name": { - "description": "Cloud Run fully managed: not supported Cloud Run on GKE: supported The ConfigMap to select from.", + "description": "The ConfigMap to select from.", "type": "string" }, "optional": { - "description": "Cloud Run fully managed: not supported Cloud Run on GKE: supported Specify whether the ConfigMap or its key must be defined +optional", + "description": "(Optional) Specify whether the ConfigMap or its key must be defined", "type": "boolean" } }, "type": "object" }, "ConfigMapVolumeSource": { - "description": "Adapts a ConfigMap into a volume. The contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths.", + "description": "Not supported by Cloud Run Adapts a ConfigMap into a volume. The contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths.", "id": "ConfigMapVolumeSource", "properties": { "defaultMode": { - "description": "Mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "description": "(Optional) Mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", "format": "int32", "type": "integer" }, "items": { - "description": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional.", + "description": "(Optional) If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified that is not present in the Secret, the volume setup will error unless it is marked optional.", "items": { "$ref": "KeyToPath" }, @@ -355,7 +334,7 @@ "type": "string" }, "optional": { - "description": "Specify whether the Secret or its keys must be defined.", + "description": "(Optional) Specify whether the Secret or its keys must be defined.", "type": "boolean" } }, @@ -366,55 +345,50 @@ "id": "Container", "properties": { "args": { - "description": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell +optional", + "description": "(Optional) Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", "items": { "type": "string" }, "type": "array" }, "command": { - "description": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell +optional", "items": { "type": "string" }, "type": "array" }, "env": { - "description": "List of environment variables to set in the container. Cannot be updated. +optional", + "description": "(Optional) List of environment variables to set in the container.", "items": { "$ref": "EnvVar" }, "type": "array" }, "envFrom": { - "description": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. +optional", + "description": "(Optional) List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", "items": { "$ref": "EnvFromSource" }, "type": "array" }, "image": { - "description": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images", + "description": "Only supports containers from Google Container Registry or Artifact Registry URL of the Container image. More info: https://kubernetes.io/docs/concepts/containers/images", "type": "string" }, "imagePullPolicy": { - "description": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images +optional", + "description": "(Optional) Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", "type": "string" }, - "lifecycle": { - "$ref": "Lifecycle", - "description": "Actions that the management system should take in response to container lifecycle events. Cannot be updated. +optional" - }, "livenessProbe": { "$ref": "Probe", - "description": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional" + "description": "(Optional) Periodic probe of container liveness. Container will be restarted if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" }, "name": { - "description": "Name of the container specified as a DNS_LABEL. Each container must have a unique name (DNS_LABEL). Cannot be updated.", + "description": "(Optional) Name of the container specified as a DNS_LABEL. Currently unused in Cloud Run. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names", "type": "string" }, "ports": { - "description": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated. +optional", + "description": "(Optional) List of ports to expose from the container. Only a single port can be specified. The specified ports must be listening on all interfaces (0.0.0.0) within the container to be accessible. If omitted, a port number will be chosen and passed to the container through the PORT environment variable for the container to listen on.", "items": { "$ref": "ContainerPort" }, @@ -422,52 +396,37 @@ }, "readinessProbe": { "$ref": "Probe", - "description": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional" + "description": "(Optional) Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" }, "resources": { "$ref": "ResourceRequirements", - "description": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources +optional" + "description": "(Optional) Compute Resources required by this container. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources" }, "securityContext": { "$ref": "SecurityContext", - "description": "Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +optional" - }, - "stdin": { - "description": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. +optional", - "type": "boolean" + "description": "(Optional) Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/" }, - "stdinOnce": { - "description": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false +optional", - "type": "boolean" + "startupProbe": { + "$ref": "Probe", + "description": "(Optional) Startup probe of application within the container. All other probes are disabled if a startup probe is provided, until it succeeds. Container will not be added to service endpoints if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" }, "terminationMessagePath": { - "description": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. +optional", + "description": "(Optional) Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log.", "type": "string" }, "terminationMessagePolicy": { - "description": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. +optional", + "description": "(Optional) Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", "type": "string" }, - "tty": { - "description": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. +optional", - "type": "boolean" - }, - "volumeDevices": { - "description": "volumeDevices is the list of block devices to be used by the container. This is an alpha feature and may change in the future. +optional", - "items": { - "$ref": "VolumeDevice" - }, - "type": "array" - }, "volumeMounts": { - "description": "Pod volumes to mount into the container's filesystem. Cannot be updated. +optional", + "description": "(Optional) Volume to mount into the container's filesystem. Only supports SecretVolumeSources. Pod volumes to mount into the container's filesystem.", "items": { "$ref": "VolumeMount" }, "type": "array" }, "workingDir": { - "description": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. +optional", + "description": "(Optional) Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image.", "type": "string" } }, @@ -478,25 +437,16 @@ "id": "ContainerPort", "properties": { "containerPort": { - "description": "Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.", - "format": "int32", - "type": "integer" - }, - "hostIP": { - "description": "What host IP to bind the external port to. +optional", - "type": "string" - }, - "hostPort": { - "description": "Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. +optional", + "description": "(Optional) Port number the container listens on. This must be a valid port number, 0 < x < 65536.", "format": "int32", "type": "integer" }, "name": { - "description": "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. +optional", + "description": "(Optional) If specified, used to specify which protocol to use. Allowed values are \"http1\" and \"h2c\".", "type": "string" }, "protocol": { - "description": "Protocol for port. Must be UDP or TCP. Defaults to \"TCP\". +optional", + "description": "(Optional) Protocol for port. Must be \"TCP\". Defaults to \"TCP\".", "type": "string" } }, @@ -509,20 +459,20 @@ "type": "object" }, "EnvFromSource": { - "description": "EnvFromSource represents the source of a set of ConfigMaps", + "description": "Not supported by Cloud Run EnvFromSource represents the source of a set of ConfigMaps", "id": "EnvFromSource", "properties": { "configMapRef": { "$ref": "ConfigMapEnvSource", - "description": "The ConfigMap to select from +optional" + "description": "(Optional) The ConfigMap to select from" }, "prefix": { - "description": "An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. +optional", + "description": "(Optional) An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.", "type": "string" }, "secretRef": { "$ref": "SecretEnvSource", - "description": "The Secret to select from +optional" + "description": "(Optional) The Secret to select from" } }, "type": "object" @@ -536,37 +486,37 @@ "type": "string" }, "value": { - "description": "Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\". +optional", + "description": "(Optional) Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".", "type": "string" }, "valueFrom": { "$ref": "EnvVarSource", - "description": "Cloud Run fully managed: supported Source for the environment variable's value. Only supports secret_key_ref. Cloud Run for Anthos: supported Source for the environment variable's value. Cannot be used if value is not empty. +optional" + "description": "(Optional) Source for the environment variable's value. Only supports secret_key_ref. Source for the environment variable's value. Cannot be used if value is not empty." } }, "type": "object" }, "EnvVarSource": { - "description": "Cloud Run fully managed: not supported Cloud Run on GKE: supported EnvVarSource represents a source for the value of an EnvVar.", + "description": "EnvVarSource represents a source for the value of an EnvVar.", "id": "EnvVarSource", "properties": { "configMapKeyRef": { "$ref": "ConfigMapKeySelector", - "description": "Cloud Run fully managed: not supported Cloud Run on GKE: supported Selects a key of a ConfigMap. +optional" + "description": "(Optional) Not supported by Cloud Run Selects a key of a ConfigMap." }, "secretKeyRef": { "$ref": "SecretKeySelector", - "description": "Cloud Run fully managed: supported. Selects a key (version) of a secret in Secret Manager. Cloud Run for Anthos: supported. Selects a key of a secret in the pod's namespace. +optional" + "description": "(Optional) Selects a key (version) of a secret in Secret Manager." } }, "type": "object" }, "ExecAction": { - "description": "ExecAction describes a \"run in container\" action.", + "description": "Not supported by Cloud Run ExecAction describes a \"run in container\" action.", "id": "ExecAction", "properties": { "command": { - "description": "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional", + "description": "(Optional) Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.", "items": { "type": "string" }, @@ -576,37 +526,33 @@ "type": "object" }, "HTTPGetAction": { - "description": "HTTPGetAction describes an action based on HTTP Get requests.", + "description": "Not supported by Cloud Run HTTPGetAction describes an action based on HTTP Get requests.", "id": "HTTPGetAction", "properties": { "host": { - "description": "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead. +optional", + "description": "(Optional) Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.", "type": "string" }, "httpHeaders": { - "description": "Custom headers to set in the request. HTTP allows repeated headers. +optional", + "description": "(Optional) Custom headers to set in the request. HTTP allows repeated headers.", "items": { "$ref": "HTTPHeader" }, "type": "array" }, "path": { - "description": "Path to access on the HTTP server. +optional", + "description": "(Optional) Path to access on the HTTP server.", "type": "string" }, - "port": { - "$ref": "IntOrString", - "description": "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." - }, "scheme": { - "description": "Scheme to use for connecting to the host. Defaults to HTTP. +optional", + "description": "(Optional) Scheme to use for connecting to the host. Defaults to HTTP.", "type": "string" } }, "type": "object" }, "HTTPHeader": { - "description": "HTTPHeader describes a custom header to be used in HTTP probes", + "description": "Not supported by Cloud Run HTTPHeader describes a custom header to be used in HTTP probes", "id": "HTTPHeader", "properties": { "name": { @@ -620,25 +566,6 @@ }, "type": "object" }, - "Handler": { - "description": "Handler defines a specific action that should be taken", - "id": "Handler", - "properties": { - "exec": { - "$ref": "ExecAction", - "description": "One and only one of the following should be specified. Exec specifies the action to take. +optional" - }, - "httpGet": { - "$ref": "HTTPGetAction", - "description": "HTTPGet specifies the http request to perform. +optional" - }, - "tcpSocket": { - "$ref": "TCPSocketAction", - "description": "TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported" - } - }, - "type": "object" - }, "InstanceSpec": { "description": "InstanceSpec is a description of an instance.", "id": "InstanceSpec", @@ -731,27 +658,6 @@ }, "type": "object" }, - "IntOrString": { - "description": "IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.", - "id": "IntOrString", - "properties": { - "intVal": { - "description": "The int value.", - "format": "int32", - "type": "integer" - }, - "strVal": { - "description": "The string value.", - "type": "string" - }, - "type": { - "description": "The type of the value.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, "Job": { "description": "Job represents the configuration of a single job. A job an immutable resource that references a container image which is run to completion.", "id": "Job", @@ -907,36 +813,21 @@ "id": "KeyToPath", "properties": { "key": { - "description": "Cloud Run fully managed: supported The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. Cloud Run for Anthos: supported The key to project.", + "description": "The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. The key to project.", "type": "string" }, "mode": { - "description": "Mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. +optional", + "description": "(Optional) Mode bits to use on this file, must be a value between 0000 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", "format": "int32", "type": "integer" }, "path": { - "description": "Cloud Run fully managed: supported Cloud Run for Anthos: supported The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.", + "description": "The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.", "type": "string" } }, "type": "object" }, - "Lifecycle": { - "description": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.", - "id": "Lifecycle", - "properties": { - "postStart": { - "$ref": "Handler", - "description": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +optional" - }, - "preStop": { - "$ref": "Handler", - "description": "PreStop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +optional" - } - }, - "type": "object" - }, "ListJobsResponse": { "description": "ListJobsResponse is a list of Jobs resources.", "id": "ListJobsResponse", @@ -994,59 +885,59 @@ "type": "object" }, "LocalObjectReference": { - "description": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", + "description": "Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", "id": "LocalObjectReference", "properties": { "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "description": "(Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "type": "string" } }, "type": "object" }, "ObjectMeta": { - "description": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", + "description": "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", "id": "ObjectMeta", "properties": { "annotations": { "additionalProperties": { "type": "string" }, - "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations +optional", + "description": "(Optional) Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", "type": "object" }, "clusterName": { - "description": "Not currently supported by Cloud Run. The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. +optional", + "description": "(Optional) Not supported by Cloud Run The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", "type": "string" }, "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional", + "description": "(Optional) CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "format": "google-datetime", "type": "string" }, "deletionGracePeriodSeconds": { - "description": "Not currently supported by Cloud Run. Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. +optional", + "description": "(Optional) Not supported by Cloud Run Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", "format": "int32", "type": "integer" }, "deletionTimestamp": { - "description": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional", + "description": "(Optional) Not supported by Cloud Run DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "format": "google-datetime", "type": "string" }, "finalizers": { - "description": "Not currently supported by Cloud Run. Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. +optional +patchStrategy=merge", + "description": "(Optional) Not supported by Cloud Run Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. +patchStrategy=merge", "items": { "type": "string" }, "type": "array" }, "generateName": { - "description": "Not currently supported by Cloud Run. GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency +optional string generateName = 2;", + "description": "(Optional) Not supported by Cloud Run GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency string generateName = 2;", "type": "string" }, "generation": { - "description": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. +optional", + "description": "(Optional) A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", "format": "int32", "type": "integer" }, @@ -1054,7 +945,7 @@ "additionalProperties": { "type": "string" }, - "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and routes. More info: http://kubernetes.io/docs/user-guide/labels +optional", + "description": "(Optional) Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and routes. More info: http://kubernetes.io/docs/user-guide/labels", "type": "object" }, "name": { @@ -1066,22 +957,22 @@ "type": "string" }, "ownerReferences": { - "description": "List of objects that own this object. If ALL objects in the list have been deleted, this object will be garbage collected. +optional", + "description": "(Optional) Not supported by Cloud Run List of objects that own this object. If ALL objects in the list have been deleted, this object will be garbage collected.", "items": { "$ref": "OwnerReference" }, "type": "array" }, "resourceVersion": { - "description": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency +optional", + "description": "Optional. An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server or omit the value to disable conflict-detection. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients or omitted. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", "type": "string" }, "selfLink": { - "description": "SelfLink is a URL representing this object. Populated by the system. Read-only. +optional string selfLink = 4;", + "description": "(Optional) SelfLink is a URL representing this object. Populated by the system. Read-only. string selfLink = 4;", "type": "string" }, "uid": { - "description": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids +optional", + "description": "(Optional) UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", "type": "string" } }, @@ -1119,52 +1010,49 @@ "type": "object" }, "Probe": { - "description": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.", + "description": "Not supported by Cloud Run Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.", "id": "Probe", "properties": { + "exec": { + "$ref": "ExecAction", + "description": "(Optional) One and only one of the following should be specified. Exec specifies the action to take. A field inlined from the Handler message." + }, "failureThreshold": { - "description": "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. +optional", + "description": "(Optional) Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.", "format": "int32", "type": "integer" }, - "handler": { - "$ref": "Handler", - "description": "The action taken to determine the health of a container" + "httpGet": { + "$ref": "HTTPGetAction", + "description": "(Optional) HTTPGet specifies the http request to perform. A field inlined from the Handler message." }, "initialDelaySeconds": { - "description": "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional", + "description": "(Optional) Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "format": "int32", "type": "integer" }, "periodSeconds": { - "description": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. +optional", + "description": "(Optional) How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.", "format": "int32", "type": "integer" }, "successThreshold": { - "description": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. +optional", + "description": "(Optional) Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.", "format": "int32", "type": "integer" }, + "tcpSocket": { + "$ref": "TCPSocketAction", + "description": "(Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported A field inlined from the Handler message." + }, "timeoutSeconds": { - "description": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional", + "description": "(Optional) Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "format": "int32", "type": "integer" } }, "type": "object" }, - "Quantity": { - "description": "The view model of a single quantity, e.g. \"800 MiB\". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto", - "id": "Quantity", - "properties": { - "string": { - "description": "Stringified version of the quantity, e.g., \"800 MiB\".", - "type": "string" - } - }, - "type": "object" - }, "ResourceRequirements": { "description": "ResourceRequirements describes the compute resource requirements.", "id": "ResourceRequirements", @@ -1173,58 +1061,21 @@ "additionalProperties": { "type": "string" }, - "description": "Limits describes the maximum amount of compute resources allowed. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go", - "type": "object" - }, - "limitsInMap": { - "additionalProperties": { - "$ref": "Quantity" - }, - "description": "Limits describes the maximum amount of compute resources allowed. This is a temporary field created to migrate away from the map limits field. This is done to become compliant with k8s style API. This field is deprecated in favor of limits field.", + "description": "(Optional) Only memory and CPU are supported. Note: The only supported values for CPU are '1', '2', and '4'. Setting 4 CPU requires at least 2Gi of memory. Limits describes the maximum amount of compute resources allowed. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go", "type": "object" }, "requests": { "additionalProperties": { "type": "string" }, - "description": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go", - "type": "object" - }, - "requestsInMap": { - "additionalProperties": { - "$ref": "Quantity" - }, - "description": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. This is a temporary field created to migrate away from the map requests field. This is done to become compliant with k8s style API. This field is deprecated in favor of requests field.", + "description": "(Optional) Only memory and CPU are supported. Note: The only supported values for CPU are '1' and '2'. Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go", "type": "object" } }, "type": "object" }, - "SELinuxOptions": { - "description": "SELinuxOptions are the labels to be applied to the container", - "id": "SELinuxOptions", - "properties": { - "level": { - "description": "Level is SELinux level label that applies to the container. +optional", - "type": "string" - }, - "role": { - "description": "Role is a SELinux role label that applies to the container. +optional", - "type": "string" - }, - "type": { - "description": "Type is a SELinux type label that applies to the container. +optional", - "type": "string" - }, - "user": { - "description": "User is a SELinux user label that applies to the container. +optional", - "type": "string" - } - }, - "type": "object" - }, "SecretEnvSource": { - "description": "SecretEnvSource selects a Secret to populate the environment variables with. The contents of the target Secret's Data field will represent the key-value pairs as environment variables.", + "description": "Not supported by Cloud Run SecretEnvSource selects a Secret to populate the environment variables with. The contents of the target Secret's Data field will represent the key-value pairs as environment variables.", "id": "SecretEnvSource", "properties": { "localObjectReference": { @@ -1232,22 +1083,22 @@ "description": "This field should not be used directly as it is meant to be inlined directly into the message. Use the \"name\" field instead." }, "name": { - "description": "Cloud Run fully managed: not supported Cloud Run for Anthos: supported The Secret to select from.", + "description": "The Secret to select from.", "type": "string" }, "optional": { - "description": "Cloud Run fully managed: not supported Cloud Run for Anthos: supported Specify whether the Secret must be defined +optional", + "description": "(Optional) Specify whether the Secret must be defined", "type": "boolean" } }, "type": "object" }, "SecretKeySelector": { - "description": "Cloud Run fully managed: supported Cloud Run on GKE: supported SecretKeySelector selects a key of a Secret.", + "description": "SecretKeySelector selects a key of a Secret.", "id": "SecretKeySelector", "properties": { "key": { - "description": "Cloud Run fully managed: supported A Cloud Secret Manager secret version. Must be 'latest' for the latest version or an integer for a specific version. Cloud Run for Anthos: supported The key of the secret to select from. Must be a valid secret key.", + "description": "A Cloud Secret Manager secret version. Must be 'latest' for the latest version or an integer for a specific version. The key of the secret to select from. Must be a valid secret key.", "type": "string" }, "localObjectReference": { @@ -1255,101 +1106,73 @@ "description": "This field should not be used directly as it is meant to be inlined directly into the message. Use the \"name\" field instead." }, "name": { - "description": "Cloud Run fully managed: supported The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. Cloud Run for Anthos: supported The name of the secret in the pod's namespace to select from.", + "description": "The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. The name of the secret in the pod's namespace to select from.", "type": "string" }, "optional": { - "description": "Cloud Run fully managed: not supported Cloud Run on GKE: supported Specify whether the Secret or its key must be defined +optional", + "description": "(Optional) Specify whether the Secret or its key must be defined", "type": "boolean" } }, "type": "object" }, "SecretVolumeSource": { - "description": "The contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names.", + "description": "The secret's value will be presented as the content of a file whose name is defined in the item path. If no items are defined, the name of the file is the secret_name. The contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names.", "id": "SecretVolumeSource", "properties": { "defaultMode": { - "description": "Mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "description": "(Optional) Mode bits to use on created files by default. Must be a value between 0000 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. NOTE: This is an integer representation of the mode bits. So, the integer value should look exactly as the chmod numeric notation, i.e. Unix chmod \"777\" (a=rwx) should have the integer value 777.", "format": "int32", "type": "integer" }, "items": { - "description": "Cloud Run fully managed: supported If unspecified, the volume will expose a file whose name is the secret_name. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a key and a path. Cloud Run for Anthos: supported If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional.", + "description": "(Optional) If unspecified, the volume will expose a file whose name is the secret_name. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a key and a path. If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified that is not present in the Secret, the volume setup will error unless it is marked optional.", "items": { "$ref": "KeyToPath" }, "type": "array" }, "optional": { - "description": "Specify whether the Secret or its keys must be defined.", + "description": "(Optional) Specify whether the Secret or its keys must be defined.", "type": "boolean" }, "secretName": { - "description": "Cloud Run fully managed: supported The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. Cloud Run for Anthos: supported Name of the secret in the container's namespace to use.", + "description": "The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. Name of the secret in the container's namespace to use.", "type": "string" } }, "type": "object" }, "SecurityContext": { - "description": "SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.", + "description": "Not supported by Cloud Run SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.", "id": "SecurityContext", "properties": { - "allowPrivilegeEscalation": { - "description": "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN +optional", - "type": "boolean" - }, - "capabilities": { - "$ref": "Capabilities", - "description": "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. +optional" - }, - "privileged": { - "description": "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. +optional", - "type": "boolean" - }, - "readOnlyRootFilesystem": { - "description": "Whether this container has a read-only root filesystem. Default is false. +optional", - "type": "boolean" - }, - "runAsGroup": { - "description": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional", - "format": "int32", - "type": "integer" - }, - "runAsNonRoot": { - "description": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional", - "type": "boolean" - }, "runAsUser": { - "description": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional", + "description": "(Optional) The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "format": "int32", "type": "integer" - }, - "seLinuxOptions": { - "$ref": "SELinuxOptions", - "description": "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional" } }, "type": "object" }, "TCPSocketAction": { - "description": "TCPSocketAction describes an action based on opening a socket", + "description": "Not supported by Cloud Run TCPSocketAction describes an action based on opening a socket", "id": "TCPSocketAction", "properties": { "host": { - "description": "Optional: Host name to connect to, defaults to the pod IP. +optional", + "description": "(Optional) Optional: Host name to connect to, defaults to the pod IP.", "type": "string" }, "port": { - "$ref": "IntOrString", - "description": "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + "description": "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. This field is currently limited to integer types only because of proto's inability to properly support the IntOrString golang type.", + "format": "int32", + "type": "integer" } }, "type": "object" }, "Volume": { - "description": "Volume represents a named volume in a container.", + "description": "Not supported by Cloud Run Volume represents a named volume in a container.", "id": "Volume", "properties": { "configMap": { @@ -1365,43 +1188,24 @@ }, "type": "object" }, - "VolumeDevice": { - "description": "volumeDevice describes a mapping of a raw block device within a container.", - "id": "VolumeDevice", - "properties": { - "devicePath": { - "description": "devicePath is the path inside of the container that the device will be mapped to.", - "type": "string" - }, - "name": { - "description": "name must match the name of a persistentVolumeClaim in the pod", - "type": "string" - } - }, - "type": "object" - }, "VolumeMount": { - "description": "VolumeMount describes a mounting of a Volume within a container.", + "description": "Not supported by Cloud Run VolumeMount describes a mounting of a Volume within a container.", "id": "VolumeMount", "properties": { "mountPath": { "description": "Path within the container at which the volume should be mounted. Must not contain ':'.", "type": "string" }, - "mountPropagation": { - "description": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationHostToContainer is used. This field is beta in 1.10. +optional", - "type": "string" - }, "name": { "description": "This must match the Name of a Volume.", "type": "string" }, "readOnly": { - "description": "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. +optional", + "description": "(Optional) Only true is accepted. Defaults to true.", "type": "boolean" }, "subPath": { - "description": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root). +optional", + "description": "(Optional) Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", "type": "string" } }, diff --git a/googleapiclient/discovery_cache/documents/runtimeconfig.v1.json b/googleapiclient/discovery_cache/documents/runtimeconfig.v1.json index b2fb86cacae..a27613eafe8 100644 --- a/googleapiclient/discovery_cache/documents/runtimeconfig.v1.json +++ b/googleapiclient/discovery_cache/documents/runtimeconfig.v1.json @@ -210,7 +210,7 @@ } } }, - "revision": "20210618", + "revision": "20210628", "rootUrl": "https://runtimeconfig.googleapis.com/", "schemas": { "CancelOperationRequest": { diff --git a/googleapiclient/discovery_cache/documents/safebrowsing.v4.json b/googleapiclient/discovery_cache/documents/safebrowsing.v4.json index 27bda3678e9..7092949ecd5 100644 --- a/googleapiclient/discovery_cache/documents/safebrowsing.v4.json +++ b/googleapiclient/discovery_cache/documents/safebrowsing.v4.json @@ -261,7 +261,7 @@ } } }, - "revision": "20210625", + "revision": "20210701", "rootUrl": "https://safebrowsing.googleapis.com/", "schemas": { "GoogleProtobufEmpty": { diff --git a/googleapiclient/discovery_cache/documents/script.v1.json b/googleapiclient/discovery_cache/documents/script.v1.json index 5bb39efef5b..d9dcceaabe5 100644 --- a/googleapiclient/discovery_cache/documents/script.v1.json +++ b/googleapiclient/discovery_cache/documents/script.v1.json @@ -887,7 +887,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://script.googleapis.com/", "schemas": { "Content": { diff --git a/googleapiclient/discovery_cache/documents/searchconsole.v1.json b/googleapiclient/discovery_cache/documents/searchconsole.v1.json index 3abf88b4d5a..c632c1607d4 100644 --- a/googleapiclient/discovery_cache/documents/searchconsole.v1.json +++ b/googleapiclient/discovery_cache/documents/searchconsole.v1.json @@ -373,7 +373,7 @@ } } }, - "revision": "20210625", + "revision": "20210703", "rootUrl": "https://searchconsole.googleapis.com/", "schemas": { "ApiDataRow": { diff --git a/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json b/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json index 38562bcb331..109f72422ba 100644 --- a/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json +++ b/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json @@ -542,7 +542,7 @@ } } }, - "revision": "20210625", + "revision": "20210628", "rootUrl": "https://serviceconsumermanagement.googleapis.com/", "schemas": { "AddTenantProjectRequest": { diff --git a/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json b/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json index 75d646c680f..eccac0e35bd 100644 --- a/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json @@ -500,7 +500,7 @@ } } }, - "revision": "20210625", + "revision": "20210628", "rootUrl": "https://serviceconsumermanagement.googleapis.com/", "schemas": { "Api": { diff --git a/googleapiclient/discovery_cache/documents/servicecontrol.v1.json b/googleapiclient/discovery_cache/documents/servicecontrol.v1.json index 176f6c7484e..2b57cf7e9e9 100644 --- a/googleapiclient/discovery_cache/documents/servicecontrol.v1.json +++ b/googleapiclient/discovery_cache/documents/servicecontrol.v1.json @@ -197,7 +197,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://servicecontrol.googleapis.com/", "schemas": { "AllocateInfo": { diff --git a/googleapiclient/discovery_cache/documents/servicecontrol.v2.json b/googleapiclient/discovery_cache/documents/servicecontrol.v2.json index 3712733d6e8..7a906fcc760 100644 --- a/googleapiclient/discovery_cache/documents/servicecontrol.v2.json +++ b/googleapiclient/discovery_cache/documents/servicecontrol.v2.json @@ -169,7 +169,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://servicecontrol.googleapis.com/", "schemas": { "Api": { diff --git a/googleapiclient/discovery_cache/documents/servicemanagement.v1.json b/googleapiclient/discovery_cache/documents/servicemanagement.v1.json index da4c26bed55..bfbe2e80b33 100644 --- a/googleapiclient/discovery_cache/documents/servicemanagement.v1.json +++ b/googleapiclient/discovery_cache/documents/servicemanagement.v1.json @@ -829,7 +829,7 @@ } } }, - "revision": "20210624", + "revision": "20210625", "rootUrl": "https://servicemanagement.googleapis.com/", "schemas": { "Advice": { diff --git a/googleapiclient/discovery_cache/documents/servicenetworking.v1.json b/googleapiclient/discovery_cache/documents/servicenetworking.v1.json index 40c8148d285..c19262a7bdb 100644 --- a/googleapiclient/discovery_cache/documents/servicenetworking.v1.json +++ b/googleapiclient/discovery_cache/documents/servicenetworking.v1.json @@ -860,7 +860,7 @@ } } }, - "revision": "20210622", + "revision": "20210628", "rootUrl": "https://servicenetworking.googleapis.com/", "schemas": { "AddDnsRecordSetMetadata": { diff --git a/googleapiclient/discovery_cache/documents/servicenetworking.v1beta.json b/googleapiclient/discovery_cache/documents/servicenetworking.v1beta.json index 969c33472a2..006e5870beb 100644 --- a/googleapiclient/discovery_cache/documents/servicenetworking.v1beta.json +++ b/googleapiclient/discovery_cache/documents/servicenetworking.v1beta.json @@ -307,7 +307,7 @@ } } }, - "revision": "20210622", + "revision": "20210628", "rootUrl": "https://servicenetworking.googleapis.com/", "schemas": { "AddDnsRecordSetMetadata": { diff --git a/googleapiclient/discovery_cache/documents/serviceusage.v1.json b/googleapiclient/discovery_cache/documents/serviceusage.v1.json index f32f7afb4c4..ae2ba8b05c6 100644 --- a/googleapiclient/discovery_cache/documents/serviceusage.v1.json +++ b/googleapiclient/discovery_cache/documents/serviceusage.v1.json @@ -426,7 +426,7 @@ } } }, - "revision": "20210625", + "revision": "20210628", "rootUrl": "https://serviceusage.googleapis.com/", "schemas": { "AdminQuotaPolicy": { diff --git a/googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json b/googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json index eca38dc72fd..38b9c7cea94 100644 --- a/googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json @@ -959,7 +959,7 @@ } } }, - "revision": "20210625", + "revision": "20210628", "rootUrl": "https://serviceusage.googleapis.com/", "schemas": { "AdminQuotaPolicy": { diff --git a/googleapiclient/discovery_cache/documents/sheets.v4.json b/googleapiclient/discovery_cache/documents/sheets.v4.json index 1e8d9d7ed1f..914bfb9d9e4 100644 --- a/googleapiclient/discovery_cache/documents/sheets.v4.json +++ b/googleapiclient/discovery_cache/documents/sheets.v4.json @@ -870,7 +870,7 @@ } } }, - "revision": "20210622", + "revision": "20210630", "rootUrl": "https://sheets.googleapis.com/", "schemas": { "AddBandingRequest": { diff --git a/googleapiclient/discovery_cache/documents/slides.v1.json b/googleapiclient/discovery_cache/documents/slides.v1.json index ae9e01555f8..76bed9da31b 100644 --- a/googleapiclient/discovery_cache/documents/slides.v1.json +++ b/googleapiclient/discovery_cache/documents/slides.v1.json @@ -313,7 +313,7 @@ } } }, - "revision": "20210614", + "revision": "20210622", "rootUrl": "https://slides.googleapis.com/", "schemas": { "AffineTransform": { @@ -3461,6 +3461,10 @@ "description": "The properties of Page that are only relevant for pages with page_type SLIDE.", "id": "SlideProperties", "properties": { + "isSkipped": { + "description": "Whether the slide is skipped in the presentation mode. Defaults to false.", + "type": "boolean" + }, "layoutObjectId": { "description": "The object ID of the layout that this slide is based on. This property is read-only.", "type": "string" diff --git a/googleapiclient/discovery_cache/documents/spanner.v1.json b/googleapiclient/discovery_cache/documents/spanner.v1.json index 8c98cb37195..69a1ea62fa3 100644 --- a/googleapiclient/discovery_cache/documents/spanner.v1.json +++ b/googleapiclient/discovery_cache/documents/spanner.v1.json @@ -2037,7 +2037,7 @@ } } }, - "revision": "20210603", + "revision": "20210624", "rootUrl": "https://spanner.googleapis.com/", "schemas": { "Backup": { @@ -2971,6 +2971,11 @@ "format": "int32", "type": "integer" }, + "timeOffset": { + "description": "The time offset. This is the time since the start of the time interval.", + "format": "google-duration", + "type": "string" + }, "unit": { "$ref": "LocalizedString", "description": "The unit of the metric. This is an unstructured field and will be mapped as is to the user." @@ -4283,7 +4288,7 @@ "type": "object" }, "TransactionOptions": { - "description": "# Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a \"negotiation phase\" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as \"version GC\". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table.", + "description": "Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a \"negotiation phase\" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as \"version GC\". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table.", "id": "TransactionOptions", "properties": { "partitionedDml": { diff --git a/googleapiclient/discovery_cache/documents/speech.v1.json b/googleapiclient/discovery_cache/documents/speech.v1.json index 02623864a23..1bc2e9e2c34 100644 --- a/googleapiclient/discovery_cache/documents/speech.v1.json +++ b/googleapiclient/discovery_cache/documents/speech.v1.json @@ -212,7 +212,7 @@ } } }, - "revision": "20210617", + "revision": "20210624", "rootUrl": "https://speech.googleapis.com/", "schemas": { "ListOperationsResponse": { diff --git a/googleapiclient/discovery_cache/documents/speech.v1p1beta1.json b/googleapiclient/discovery_cache/documents/speech.v1p1beta1.json index a1a2ae5e444..ff84dfc42f0 100644 --- a/googleapiclient/discovery_cache/documents/speech.v1p1beta1.json +++ b/googleapiclient/discovery_cache/documents/speech.v1p1beta1.json @@ -524,7 +524,7 @@ } } }, - "revision": "20210617", + "revision": "20210624", "rootUrl": "https://speech.googleapis.com/", "schemas": { "ClassItem": { diff --git a/googleapiclient/discovery_cache/documents/speech.v2beta1.json b/googleapiclient/discovery_cache/documents/speech.v2beta1.json index 07573f581c6..c16b64d3de1 100644 --- a/googleapiclient/discovery_cache/documents/speech.v2beta1.json +++ b/googleapiclient/discovery_cache/documents/speech.v2beta1.json @@ -184,7 +184,7 @@ } } }, - "revision": "20210617", + "revision": "20210624", "rootUrl": "https://speech.googleapis.com/", "schemas": { "ListOperationsResponse": { diff --git a/googleapiclient/discovery_cache/documents/storage.v1.json b/googleapiclient/discovery_cache/documents/storage.v1.json index a86a117d01e..b270c2bcf9b 100644 --- a/googleapiclient/discovery_cache/documents/storage.v1.json +++ b/googleapiclient/discovery_cache/documents/storage.v1.json @@ -26,7 +26,7 @@ "description": "Stores and retrieves potentially large, immutable data objects.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/storage/docs/json_api/", - "etag": "\"31393831373135393636393931363632343834\"", + "etag": "\"3136333132313732313530383531303738323330\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -3230,7 +3230,7 @@ } } }, - "revision": "20210622", + "revision": "20210630", "rootUrl": "https://storage.googleapis.com/", "schemas": { "Bucket": { diff --git a/googleapiclient/discovery_cache/documents/storagetransfer.v1.json b/googleapiclient/discovery_cache/documents/storagetransfer.v1.json index e62f427af55..1f145b4b01d 100644 --- a/googleapiclient/discovery_cache/documents/storagetransfer.v1.json +++ b/googleapiclient/discovery_cache/documents/storagetransfer.v1.json @@ -434,7 +434,7 @@ } } }, - "revision": "20210617", + "revision": "20210624", "rootUrl": "https://storagetransfer.googleapis.com/", "schemas": { "AwsAccessKey": { @@ -467,6 +467,10 @@ "path": { "description": "Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'.", "type": "string" + }, + "roleArn": { + "description": "Input only. Role arn to support temporary credentials via AssumeRoleWithWebIdentity. When role arn is provided, transfer service will fetch temporary credentials for the session using AssumeRoleWithWebIdentity call for the provided role using the [GoogleServiceAccount] for this project.", + "type": "string" } }, "type": "object" @@ -499,7 +503,7 @@ "id": "AzureCredentials", "properties": { "sasToken": { - "description": "Required. Azure shared access signature. (see [Grant limited access to Azure Storage resources using shared access signatures (SAS)](https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview)).", + "description": "Required. Azure shared access signature (SAS). *Note:*Copying data from Azure Data Lake Storage (ADLS) Gen 2 is in [Preview](/products/#product-launch-stages). During Preview, if you are copying data from ADLS Gen 2, you must use an account SAS. For more information about SAS, see [Grant limited access to Azure Storage resources using shared access signatures (SAS)](https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview).", "type": "string" } }, @@ -640,6 +644,10 @@ "accountEmail": { "description": "Email address of the service account.", "type": "string" + }, + "subjectId": { + "description": "Unique identifier for the service account.", + "type": "string" } }, "type": "object" diff --git a/googleapiclient/discovery_cache/documents/streetviewpublish.v1.json b/googleapiclient/discovery_cache/documents/streetviewpublish.v1.json index 8d5597ae67a..b519c46c329 100644 --- a/googleapiclient/discovery_cache/documents/streetviewpublish.v1.json +++ b/googleapiclient/discovery_cache/documents/streetviewpublish.v1.json @@ -375,7 +375,7 @@ } } }, - "revision": "20210625", + "revision": "20210703", "rootUrl": "https://streetviewpublish.googleapis.com/", "schemas": { "BatchDeletePhotosRequest": { diff --git a/googleapiclient/discovery_cache/documents/sts.v1.json b/googleapiclient/discovery_cache/documents/sts.v1.json index 605346f3344..c36bdd4048f 100644 --- a/googleapiclient/discovery_cache/documents/sts.v1.json +++ b/googleapiclient/discovery_cache/documents/sts.v1.json @@ -131,7 +131,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://sts.googleapis.com/", "schemas": { "GoogleIdentityStsV1ExchangeTokenRequest": { @@ -159,11 +159,11 @@ "type": "string" }, "subjectToken": { - "description": "Required. The input token. This token is either an external credential issued by a workload identity pool provider, or a short-lived access token issued by Google. If the token is an OIDC JWT, it must use the JWT format defined in [RFC 7523](https://tools.ietf.org/html/rfc7523), and the `subject_token_type` must be `urn:ietf:params:oauth:token-type:jwt`. The following headers are required: - `kid`: The identifier of the signing key securing the JWT. - `alg`: The cryptographic algorithm securing the JWT. Must be `RS256` or `ES256`. The following payload fields are required. For more information, see [RFC 7523, Section 3](https://tools.ietf.org/html/rfc7523#section-3): - `iss`: The issuer of the token. The issuer must provide a discovery document at the URL `/.well-known/openid-configuration`, where `` is the value of this field. The document must be formatted according to section 4.2 of the [OIDC 1.0 Discovery specification](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationResponse). - `iat`: The issue time, in seconds, since the Unix epoch. Must be in the past. - `exp`: The expiration time, in seconds, since the Unix epoch. Must be less than 48 hours after `iat`. Shorter expiration times are more secure. If possible, we recommend setting an expiration time less than 6 hours. - `sub`: The identity asserted in the JWT. - `aud`: For workload identity pools, this must be a value specified in the allowed audiences for the workload identity pool provider, or one of the audiences allowed by default if no audiences were specified. See https://cloud.google.com/iam/docs/reference/rest/v1/projects.locations.workloadIdentityPools.providers#oidc Example header: ``` { \"alg\": \"RS256\", \"kid\": \"us-east-11\" } ``` Example payload: ``` { \"iss\": \"https://accounts.google.com\", \"iat\": 1517963104, \"exp\": 1517966704, \"aud\": \"//iam.googleapis.com/projects/1234567890123/locations/global/workloadIdentityPools/my-pool/providers/my-provider\", \"sub\": \"113475438248934895348\", \"my_claims\": { \"additional_claim\": \"value\" } } ``` If `subject_token` is for AWS, it must be a serialized `GetCallerIdentity` token. This token contains the same information as a request to the AWS [`GetCallerIdentity()`](https://docs.aws.amazon.com/STS/latest/APIReference/API_GetCallerIdentity) method, as well as the AWS [signature](https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) for the request information. Use Signature Version 4. Format the request as URL-encoded JSON, and set the `subject_token_type` parameter to `urn:ietf:params:aws:token-type:aws4_request`. The following parameters are required: - `url`: The URL of the AWS STS endpoint for `GetCallerIdentity()`, such as `https://sts.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15`. Regional endpoints are also supported. - `method`: The HTTP request method: `POST`. - `headers`: The HTTP request headers, which must include: - `Authorization`: The request signature. - `x-amz-date`: The time you will send the request, formatted as an [ISO8601 Basic](https://docs.aws.amazon.com/general/latest/gr/sigv4_elements.html#sigv4_elements_date) string. This value is typically set to the current time and is used to help prevent replay attacks. - `host`: The hostname of the `url` field; for example, `sts.amazonaws.com`. - `x-goog-cloud-target-resource`: The full, canonical resource name of the workload identity pool provider, with or without an `https:` prefix. To help ensure data integrity, we recommend including this header in the `SignedHeaders` field of the signed request. For example: //iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ https://iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ If you are using temporary security credentials provided by AWS, you must also include the header `x-amz-security-token`, with the value set to the session token. The following example shows a `GetCallerIdentity` token: ``` { \"headers\": [ {\"key\": \"x-amz-date\", \"value\": \"20200815T015049Z\"}, {\"key\": \"Authorization\", \"value\": \"AWS4-HMAC-SHA256+Credential=$credential,+SignedHeaders=host;x-amz-date;x-goog-cloud-target-resource,+Signature=$signature\"}, {\"key\": \"x-goog-cloud-target-resource\", \"value\": \"//iam.googleapis.com/projects//locations//workloadIdentityPools//providers/\"}, {\"key\": \"host\", \"value\": \"sts.amazonaws.com\"} . ], \"method\": \"POST\", \"url\": \"https://sts.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15\" } ``` You can also use a Google-issued OAuth 2.0 access token with this field to obtain an access token with new security attributes applied, such as a Credential Access Boundary. In this case, set `subject_token_type` to `urn:ietf:params:oauth:token-type:access_token`. If an access token already contains security attributes, you cannot apply additional security attributes.", + "description": "Required. The input token. This token is either an external credential issued by a workload identity pool provider, or a short-lived access token issued by Google. If the token is an OIDC JWT, it must use the JWT format defined in [RFC 7523](https://tools.ietf.org/html/rfc7523), and the `subject_token_type` must be either `urn:ietf:params:oauth:token-type:jwt` or `urn:ietf:params:oauth:token-type:id_token`. The following headers are required: - `kid`: The identifier of the signing key securing the JWT. - `alg`: The cryptographic algorithm securing the JWT. Must be `RS256` or `ES256`. The following payload fields are required. For more information, see [RFC 7523, Section 3](https://tools.ietf.org/html/rfc7523#section-3): - `iss`: The issuer of the token. The issuer must provide a discovery document at the URL `/.well-known/openid-configuration`, where `` is the value of this field. The document must be formatted according to section 4.2 of the [OIDC 1.0 Discovery specification](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationResponse). - `iat`: The issue time, in seconds, since the Unix epoch. Must be in the past. - `exp`: The expiration time, in seconds, since the Unix epoch. Must be less than 48 hours after `iat`. Shorter expiration times are more secure. If possible, we recommend setting an expiration time less than 6 hours. - `sub`: The identity asserted in the JWT. - `aud`: For workload identity pools, this must be a value specified in the allowed audiences for the workload identity pool provider, or one of the audiences allowed by default if no audiences were specified. See https://cloud.google.com/iam/docs/reference/rest/v1/projects.locations.workloadIdentityPools.providers#oidc Example header: ``` { \"alg\": \"RS256\", \"kid\": \"us-east-11\" } ``` Example payload: ``` { \"iss\": \"https://accounts.google.com\", \"iat\": 1517963104, \"exp\": 1517966704, \"aud\": \"//iam.googleapis.com/projects/1234567890123/locations/global/workloadIdentityPools/my-pool/providers/my-provider\", \"sub\": \"113475438248934895348\", \"my_claims\": { \"additional_claim\": \"value\" } } ``` If `subject_token` is for AWS, it must be a serialized `GetCallerIdentity` token. This token contains the same information as a request to the AWS [`GetCallerIdentity()`](https://docs.aws.amazon.com/STS/latest/APIReference/API_GetCallerIdentity) method, as well as the AWS [signature](https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) for the request information. Use Signature Version 4. Format the request as URL-encoded JSON, and set the `subject_token_type` parameter to `urn:ietf:params:aws:token-type:aws4_request`. The following parameters are required: - `url`: The URL of the AWS STS endpoint for `GetCallerIdentity()`, such as `https://sts.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15`. Regional endpoints are also supported. - `method`: The HTTP request method: `POST`. - `headers`: The HTTP request headers, which must include: - `Authorization`: The request signature. - `x-amz-date`: The time you will send the request, formatted as an [ISO8601 Basic](https://docs.aws.amazon.com/general/latest/gr/sigv4_elements.html#sigv4_elements_date) string. This value is typically set to the current time and is used to help prevent replay attacks. - `host`: The hostname of the `url` field; for example, `sts.amazonaws.com`. - `x-goog-cloud-target-resource`: The full, canonical resource name of the workload identity pool provider, with or without an `https:` prefix. To help ensure data integrity, we recommend including this header in the `SignedHeaders` field of the signed request. For example: //iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ https://iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ If you are using temporary security credentials provided by AWS, you must also include the header `x-amz-security-token`, with the value set to the session token. The following example shows a `GetCallerIdentity` token: ``` { \"headers\": [ {\"key\": \"x-amz-date\", \"value\": \"20200815T015049Z\"}, {\"key\": \"Authorization\", \"value\": \"AWS4-HMAC-SHA256+Credential=$credential,+SignedHeaders=host;x-amz-date;x-goog-cloud-target-resource,+Signature=$signature\"}, {\"key\": \"x-goog-cloud-target-resource\", \"value\": \"//iam.googleapis.com/projects//locations//workloadIdentityPools//providers/\"}, {\"key\": \"host\", \"value\": \"sts.amazonaws.com\"} . ], \"method\": \"POST\", \"url\": \"https://sts.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15\" } ``` You can also use a Google-issued OAuth 2.0 access token with this field to obtain an access token with new security attributes applied, such as a Credential Access Boundary. In this case, set `subject_token_type` to `urn:ietf:params:oauth:token-type:access_token`. If an access token already contains security attributes, you cannot apply additional security attributes.", "type": "string" }, "subjectTokenType": { - "description": "Required. An identifier that indicates the type of the security token in the `subject_token` parameter. Supported values are `urn:ietf:params:oauth:token-type:jwt`, `urn:ietf:params:aws:token-type:aws4_request`, and `urn:ietf:params:oauth:token-type:access_token`.", + "description": "Required. An identifier that indicates the type of the security token in the `subject_token` parameter. Supported values are `urn:ietf:params:oauth:token-type:jwt`, `urn:ietf:params:oauth:token-type:id_token`, `urn:ietf:params:aws:token-type:aws4_request`, and `urn:ietf:params:oauth:token-type:access_token`.", "type": "string" } }, diff --git a/googleapiclient/discovery_cache/documents/sts.v1beta.json b/googleapiclient/discovery_cache/documents/sts.v1beta.json index f01e5e2a4ef..547681a0fd0 100644 --- a/googleapiclient/discovery_cache/documents/sts.v1beta.json +++ b/googleapiclient/discovery_cache/documents/sts.v1beta.json @@ -116,7 +116,7 @@ } } }, - "revision": "20210618", + "revision": "20210625", "rootUrl": "https://sts.googleapis.com/", "schemas": { "GoogleIdentityStsV1betaExchangeTokenRequest": { @@ -144,11 +144,11 @@ "type": "string" }, "subjectToken": { - "description": "Required. The input token. This token is either an external credential issued by a workload identity pool provider, or a short-lived access token issued by Google. If the token is an OIDC JWT, it must use the JWT format defined in [RFC 7523](https://tools.ietf.org/html/rfc7523), and the `subject_token_type` must be `urn:ietf:params:oauth:token-type:jwt`. The following headers are required: - `kid`: The identifier of the signing key securing the JWT. - `alg`: The cryptographic algorithm securing the JWT. Must be `RS256` or `ES256`. The following payload fields are required. For more information, see [RFC 7523, Section 3](https://tools.ietf.org/html/rfc7523#section-3): - `iss`: The issuer of the token. The issuer must provide a discovery document at the URL `/.well-known/openid-configuration`, where `` is the value of this field. The document must be formatted according to section 4.2 of the [OIDC 1.0 Discovery specification](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationResponse). - `iat`: The issue time, in seconds, since the Unix epoch. Must be in the past. - `exp`: The expiration time, in seconds, since the Unix epoch. Must be less than 48 hours after `iat`. Shorter expiration times are more secure. If possible, we recommend setting an expiration time less than 6 hours. - `sub`: The identity asserted in the JWT. - `aud`: For workload identity pools, this must be a value specified in the allowed audiences for the workload identity pool provider, or one of the audiences allowed by default if no audiences were specified. See https://cloud.google.com/iam/docs/reference/rest/v1/projects.locations.workloadIdentityPools.providers#oidc Example header: ``` { \"alg\": \"RS256\", \"kid\": \"us-east-11\" } ``` Example payload: ``` { \"iss\": \"https://accounts.google.com\", \"iat\": 1517963104, \"exp\": 1517966704, \"aud\": \"//iam.googleapis.com/projects/1234567890123/locations/global/workloadIdentityPools/my-pool/providers/my-provider\", \"sub\": \"113475438248934895348\", \"my_claims\": { \"additional_claim\": \"value\" } } ``` If `subject_token` is for AWS, it must be a serialized `GetCallerIdentity` token. This token contains the same information as a request to the AWS [`GetCallerIdentity()`](https://docs.aws.amazon.com/STS/latest/APIReference/API_GetCallerIdentity) method, as well as the AWS [signature](https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) for the request information. Use Signature Version 4. Format the request as URL-encoded JSON, and set the `subject_token_type` parameter to `urn:ietf:params:aws:token-type:aws4_request`. The following parameters are required: - `url`: The URL of the AWS STS endpoint for `GetCallerIdentity()`, such as `https://sts.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15`. Regional endpoints are also supported. - `method`: The HTTP request method: `POST`. - `headers`: The HTTP request headers, which must include: - `Authorization`: The request signature. - `x-amz-date`: The time you will send the request, formatted as an [ISO8601 Basic](https://docs.aws.amazon.com/general/latest/gr/sigv4_elements.html#sigv4_elements_date) string. This value is typically set to the current time and is used to help prevent replay attacks. - `host`: The hostname of the `url` field; for example, `sts.amazonaws.com`. - `x-goog-cloud-target-resource`: The full, canonical resource name of the workload identity pool provider, with or without an `https:` prefix. To help ensure data integrity, we recommend including this header in the `SignedHeaders` field of the signed request. For example: //iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ https://iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ If you are using temporary security credentials provided by AWS, you must also include the header `x-amz-security-token`, with the value set to the session token. The following example shows a `GetCallerIdentity` token: ``` { \"headers\": [ {\"key\": \"x-amz-date\", \"value\": \"20200815T015049Z\"}, {\"key\": \"Authorization\", \"value\": \"AWS4-HMAC-SHA256+Credential=$credential,+SignedHeaders=host;x-amz-date;x-goog-cloud-target-resource,+Signature=$signature\"}, {\"key\": \"x-goog-cloud-target-resource\", \"value\": \"//iam.googleapis.com/projects//locations//workloadIdentityPools//providers/\"}, {\"key\": \"host\", \"value\": \"sts.amazonaws.com\"} . ], \"method\": \"POST\", \"url\": \"https://sts.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15\" } ``` You can also use a Google-issued OAuth 2.0 access token with this field to obtain an access token with new security attributes applied, such as a Credential Access Boundary. In this case, set `subject_token_type` to `urn:ietf:params:oauth:token-type:access_token`. If an access token already contains security attributes, you cannot apply additional security attributes.", + "description": "Required. The input token. This token is either an external credential issued by a workload identity pool provider, or a short-lived access token issued by Google. If the token is an OIDC JWT, it must use the JWT format defined in [RFC 7523](https://tools.ietf.org/html/rfc7523), and the `subject_token_type` must be either `urn:ietf:params:oauth:token-type:jwt` or `urn:ietf:params:oauth:token-type:id_token`. The following headers are required: - `kid`: The identifier of the signing key securing the JWT. - `alg`: The cryptographic algorithm securing the JWT. Must be `RS256` or `ES256`. The following payload fields are required. For more information, see [RFC 7523, Section 3](https://tools.ietf.org/html/rfc7523#section-3): - `iss`: The issuer of the token. The issuer must provide a discovery document at the URL `/.well-known/openid-configuration`, where `` is the value of this field. The document must be formatted according to section 4.2 of the [OIDC 1.0 Discovery specification](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationResponse). - `iat`: The issue time, in seconds, since the Unix epoch. Must be in the past. - `exp`: The expiration time, in seconds, since the Unix epoch. Must be less than 48 hours after `iat`. Shorter expiration times are more secure. If possible, we recommend setting an expiration time less than 6 hours. - `sub`: The identity asserted in the JWT. - `aud`: For workload identity pools, this must be a value specified in the allowed audiences for the workload identity pool provider, or one of the audiences allowed by default if no audiences were specified. See https://cloud.google.com/iam/docs/reference/rest/v1/projects.locations.workloadIdentityPools.providers#oidc Example header: ``` { \"alg\": \"RS256\", \"kid\": \"us-east-11\" } ``` Example payload: ``` { \"iss\": \"https://accounts.google.com\", \"iat\": 1517963104, \"exp\": 1517966704, \"aud\": \"//iam.googleapis.com/projects/1234567890123/locations/global/workloadIdentityPools/my-pool/providers/my-provider\", \"sub\": \"113475438248934895348\", \"my_claims\": { \"additional_claim\": \"value\" } } ``` If `subject_token` is for AWS, it must be a serialized `GetCallerIdentity` token. This token contains the same information as a request to the AWS [`GetCallerIdentity()`](https://docs.aws.amazon.com/STS/latest/APIReference/API_GetCallerIdentity) method, as well as the AWS [signature](https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) for the request information. Use Signature Version 4. Format the request as URL-encoded JSON, and set the `subject_token_type` parameter to `urn:ietf:params:aws:token-type:aws4_request`. The following parameters are required: - `url`: The URL of the AWS STS endpoint for `GetCallerIdentity()`, such as `https://sts.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15`. Regional endpoints are also supported. - `method`: The HTTP request method: `POST`. - `headers`: The HTTP request headers, which must include: - `Authorization`: The request signature. - `x-amz-date`: The time you will send the request, formatted as an [ISO8601 Basic](https://docs.aws.amazon.com/general/latest/gr/sigv4_elements.html#sigv4_elements_date) string. This value is typically set to the current time and is used to help prevent replay attacks. - `host`: The hostname of the `url` field; for example, `sts.amazonaws.com`. - `x-goog-cloud-target-resource`: The full, canonical resource name of the workload identity pool provider, with or without an `https:` prefix. To help ensure data integrity, we recommend including this header in the `SignedHeaders` field of the signed request. For example: //iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ https://iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ If you are using temporary security credentials provided by AWS, you must also include the header `x-amz-security-token`, with the value set to the session token. The following example shows a `GetCallerIdentity` token: ``` { \"headers\": [ {\"key\": \"x-amz-date\", \"value\": \"20200815T015049Z\"}, {\"key\": \"Authorization\", \"value\": \"AWS4-HMAC-SHA256+Credential=$credential,+SignedHeaders=host;x-amz-date;x-goog-cloud-target-resource,+Signature=$signature\"}, {\"key\": \"x-goog-cloud-target-resource\", \"value\": \"//iam.googleapis.com/projects//locations//workloadIdentityPools//providers/\"}, {\"key\": \"host\", \"value\": \"sts.amazonaws.com\"} . ], \"method\": \"POST\", \"url\": \"https://sts.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15\" } ``` You can also use a Google-issued OAuth 2.0 access token with this field to obtain an access token with new security attributes applied, such as a Credential Access Boundary. In this case, set `subject_token_type` to `urn:ietf:params:oauth:token-type:access_token`. If an access token already contains security attributes, you cannot apply additional security attributes.", "type": "string" }, "subjectTokenType": { - "description": "Required. An identifier that indicates the type of the security token in the `subject_token` parameter. Supported values are `urn:ietf:params:oauth:token-type:jwt`, `urn:ietf:params:aws:token-type:aws4_request`, and `urn:ietf:params:oauth:token-type:access_token`.", + "description": "Required. An identifier that indicates the type of the security token in the `subject_token` parameter. Supported values are `urn:ietf:params:oauth:token-type:jwt`, `urn:ietf:params:oauth:token-type:id_token`, `urn:ietf:params:aws:token-type:aws4_request`, and `urn:ietf:params:oauth:token-type:access_token`.", "type": "string" } }, diff --git a/googleapiclient/discovery_cache/documents/tagmanager.v1.json b/googleapiclient/discovery_cache/documents/tagmanager.v1.json index d8d3a23ad2a..6c5cd226206 100644 --- a/googleapiclient/discovery_cache/documents/tagmanager.v1.json +++ b/googleapiclient/discovery_cache/documents/tagmanager.v1.json @@ -1932,7 +1932,7 @@ } } }, - "revision": "20210625", + "revision": "20210629", "rootUrl": "https://tagmanager.googleapis.com/", "schemas": { "Account": { diff --git a/googleapiclient/discovery_cache/documents/tagmanager.v2.json b/googleapiclient/discovery_cache/documents/tagmanager.v2.json index 40ed80456ed..ed987ec7b27 100644 --- a/googleapiclient/discovery_cache/documents/tagmanager.v2.json +++ b/googleapiclient/discovery_cache/documents/tagmanager.v2.json @@ -3125,7 +3125,7 @@ } } }, - "revision": "20210625", + "revision": "20210629", "rootUrl": "https://tagmanager.googleapis.com/", "schemas": { "Account": { diff --git a/googleapiclient/discovery_cache/documents/tasks.v1.json b/googleapiclient/discovery_cache/documents/tasks.v1.json index e70e26f54a6..aea1f01aa20 100644 --- a/googleapiclient/discovery_cache/documents/tasks.v1.json +++ b/googleapiclient/discovery_cache/documents/tasks.v1.json @@ -566,7 +566,7 @@ } } }, - "revision": "20210625", + "revision": "20210703", "rootUrl": "https://tasks.googleapis.com/", "schemas": { "Task": { diff --git a/googleapiclient/discovery_cache/documents/testing.v1.json b/googleapiclient/discovery_cache/documents/testing.v1.json index b55dc88f97e..3845a92403d 100644 --- a/googleapiclient/discovery_cache/documents/testing.v1.json +++ b/googleapiclient/discovery_cache/documents/testing.v1.json @@ -282,7 +282,7 @@ } } }, - "revision": "20210621", + "revision": "20210625", "rootUrl": "https://testing.googleapis.com/", "schemas": { "Account": { diff --git a/googleapiclient/discovery_cache/documents/toolresults.v1beta3.json b/googleapiclient/discovery_cache/documents/toolresults.v1beta3.json index 35b5f03fda8..4e9bc69f994 100644 --- a/googleapiclient/discovery_cache/documents/toolresults.v1beta3.json +++ b/googleapiclient/discovery_cache/documents/toolresults.v1beta3.json @@ -1463,7 +1463,7 @@ } } }, - "revision": "20210628", + "revision": "20210629", "rootUrl": "https://toolresults.googleapis.com/", "schemas": { "ANR": { diff --git a/googleapiclient/discovery_cache/documents/trafficdirector.v2.json b/googleapiclient/discovery_cache/documents/trafficdirector.v2.json index 28bedf0051e..c528001b0ca 100644 --- a/googleapiclient/discovery_cache/documents/trafficdirector.v2.json +++ b/googleapiclient/discovery_cache/documents/trafficdirector.v2.json @@ -128,7 +128,7 @@ } } }, - "revision": "20210615", + "revision": "20210623", "rootUrl": "https://trafficdirector.googleapis.com/", "schemas": { "Address": { diff --git a/googleapiclient/discovery_cache/documents/transcoder.v1beta1.json b/googleapiclient/discovery_cache/documents/transcoder.v1beta1.json index 7b5e81cadf2..7e4ce5dcec9 100644 --- a/googleapiclient/discovery_cache/documents/transcoder.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/transcoder.v1beta1.json @@ -355,7 +355,7 @@ } } }, - "revision": "20210624", + "revision": "20210702", "rootUrl": "https://transcoder.googleapis.com/", "schemas": { "AdBreak": { diff --git a/googleapiclient/discovery_cache/documents/vectortile.v1.json b/googleapiclient/discovery_cache/documents/vectortile.v1.json index 91b494fe457..9dce66c601f 100644 --- a/googleapiclient/discovery_cache/documents/vectortile.v1.json +++ b/googleapiclient/discovery_cache/documents/vectortile.v1.json @@ -343,7 +343,7 @@ } } }, - "revision": "20210625", + "revision": "20210703", "rootUrl": "https://vectortile.googleapis.com/", "schemas": { "Area": { diff --git a/googleapiclient/discovery_cache/documents/vision.v1.json b/googleapiclient/discovery_cache/documents/vision.v1.json index 9b5b2eb1ff9..30cdd3737f8 100644 --- a/googleapiclient/discovery_cache/documents/vision.v1.json +++ b/googleapiclient/discovery_cache/documents/vision.v1.json @@ -1282,7 +1282,7 @@ } } }, - "revision": "20210618", + "revision": "20210626", "rootUrl": "https://vision.googleapis.com/", "schemas": { "AddProductToProductSetRequest": { diff --git a/googleapiclient/discovery_cache/documents/vision.v1p1beta1.json b/googleapiclient/discovery_cache/documents/vision.v1p1beta1.json index e472d9ec206..01047067e3d 100644 --- a/googleapiclient/discovery_cache/documents/vision.v1p1beta1.json +++ b/googleapiclient/discovery_cache/documents/vision.v1p1beta1.json @@ -449,7 +449,7 @@ } } }, - "revision": "20210618", + "revision": "20210626", "rootUrl": "https://vision.googleapis.com/", "schemas": { "AnnotateFileResponse": { diff --git a/googleapiclient/discovery_cache/documents/vision.v1p2beta1.json b/googleapiclient/discovery_cache/documents/vision.v1p2beta1.json index b2a5de494c3..49244163b6d 100644 --- a/googleapiclient/discovery_cache/documents/vision.v1p2beta1.json +++ b/googleapiclient/discovery_cache/documents/vision.v1p2beta1.json @@ -449,7 +449,7 @@ } } }, - "revision": "20210618", + "revision": "20210626", "rootUrl": "https://vision.googleapis.com/", "schemas": { "AnnotateFileResponse": { diff --git a/googleapiclient/discovery_cache/documents/webfonts.v1.json b/googleapiclient/discovery_cache/documents/webfonts.v1.json index f268f970050..e0bd4fcc286 100644 --- a/googleapiclient/discovery_cache/documents/webfonts.v1.json +++ b/googleapiclient/discovery_cache/documents/webfonts.v1.json @@ -134,7 +134,7 @@ } } }, - "revision": "20210424", + "revision": "20210629", "rootUrl": "https://webfonts.googleapis.com/", "schemas": { "Webfont": { diff --git a/googleapiclient/discovery_cache/documents/workflows.v1beta.json b/googleapiclient/discovery_cache/documents/workflows.v1beta.json index 8cee3ec3038..180007745c0 100644 --- a/googleapiclient/discovery_cache/documents/workflows.v1beta.json +++ b/googleapiclient/discovery_cache/documents/workflows.v1beta.json @@ -444,7 +444,7 @@ } } }, - "revision": "20210616", + "revision": "20210624", "rootUrl": "https://workflows.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/youtube.v3.json b/googleapiclient/discovery_cache/documents/youtube.v3.json index 937b8cdba4b..4fa65aaedcc 100644 --- a/googleapiclient/discovery_cache/documents/youtube.v3.json +++ b/googleapiclient/discovery_cache/documents/youtube.v3.json @@ -3766,7 +3766,7 @@ } } }, - "revision": "20210625", + "revision": "20210630", "rootUrl": "https://youtube.googleapis.com/", "schemas": { "AbuseReport": { diff --git a/googleapiclient/discovery_cache/documents/youtubeAnalytics.v2.json b/googleapiclient/discovery_cache/documents/youtubeAnalytics.v2.json index 9abf4f13761..c1f7c978ffc 100644 --- a/googleapiclient/discovery_cache/documents/youtubeAnalytics.v2.json +++ b/googleapiclient/discovery_cache/documents/youtubeAnalytics.v2.json @@ -421,7 +421,7 @@ } } }, - "revision": "20210625", + "revision": "20210630", "rootUrl": "https://youtubeanalytics.googleapis.com/", "schemas": { "EmptyResponse": { diff --git a/googleapiclient/discovery_cache/documents/youtubereporting.v1.json b/googleapiclient/discovery_cache/documents/youtubereporting.v1.json index f75d1d8faea..2800f69c0a3 100644 --- a/googleapiclient/discovery_cache/documents/youtubereporting.v1.json +++ b/googleapiclient/discovery_cache/documents/youtubereporting.v1.json @@ -411,7 +411,7 @@ } } }, - "revision": "20210626", + "revision": "20210630", "rootUrl": "https://youtubereporting.googleapis.com/", "schemas": { "Empty": {