diff --git a/docs/dyn/adexchangebuyer2_v2beta1.accounts.clients.html b/docs/dyn/adexchangebuyer2_v2beta1.accounts.clients.html index ce7c6f4d7df..dbc8460c7b9 100644 --- a/docs/dyn/adexchangebuyer2_v2beta1.accounts.clients.html +++ b/docs/dyn/adexchangebuyer2_v2beta1.accounts.clients.html @@ -119,7 +119,7 @@

Method Details

{ # A client resource represents a client buyer—an agency, a brand, or an advertiser customer of the sponsor buyer. Users associated with the client buyer have restricted access to the Marketplace and certain other sections of the Authorized Buyers UI based on the role granted to the client buyer. All fields are required unless otherwise specified. "clientAccountId": "A String", # The globally-unique numerical ID of the client. The value of this field is ignored in create and update operations. - "clientName": "A String", # Name used to represent this client to publishers. You may have multiple clients that map to the same entity, but for each client the combination of `clientName` and entity must be unique. You can specify this field as empty. + "clientName": "A String", # Name used to represent this client to publishers. You may have multiple clients that map to the same entity, but for each client the combination of `clientName` and entity must be unique. You can specify this field as empty. Maximum length of 255 characters is allowed. "entityId": "A String", # Numerical identifier of the client entity. The entity can be an advertiser, a brand, or an agency. This identifier is unique among all the entities with the same type. The value of this field is ignored if the entity type is not provided. A list of all known advertisers with their identifiers is available in the [advertisers.txt](https://storage.googleapis.com/adx-rtb-dictionaries/advertisers.txt) file. A list of all known brands with their identifiers is available in the [brands.txt](https://storage.googleapis.com/adx-rtb-dictionaries/brands.txt) file. A list of all known agencies with their identifiers is available in the [agencies.txt](https://storage.googleapis.com/adx-rtb-dictionaries/agencies.txt) file. "entityName": "A String", # The name of the entity. This field is automatically fetched based on the type and ID. The value of this field is ignored in create and update operations. "entityType": "A String", # An optional field for specifying the type of the client entity: `ADVERTISER`, `BRAND`, or `AGENCY`. @@ -139,7 +139,7 @@

Method Details

{ # A client resource represents a client buyer—an agency, a brand, or an advertiser customer of the sponsor buyer. Users associated with the client buyer have restricted access to the Marketplace and certain other sections of the Authorized Buyers UI based on the role granted to the client buyer. All fields are required unless otherwise specified. "clientAccountId": "A String", # The globally-unique numerical ID of the client. The value of this field is ignored in create and update operations. - "clientName": "A String", # Name used to represent this client to publishers. You may have multiple clients that map to the same entity, but for each client the combination of `clientName` and entity must be unique. You can specify this field as empty. + "clientName": "A String", # Name used to represent this client to publishers. You may have multiple clients that map to the same entity, but for each client the combination of `clientName` and entity must be unique. You can specify this field as empty. Maximum length of 255 characters is allowed. "entityId": "A String", # Numerical identifier of the client entity. The entity can be an advertiser, a brand, or an agency. This identifier is unique among all the entities with the same type. The value of this field is ignored if the entity type is not provided. A list of all known advertisers with their identifiers is available in the [advertisers.txt](https://storage.googleapis.com/adx-rtb-dictionaries/advertisers.txt) file. A list of all known brands with their identifiers is available in the [brands.txt](https://storage.googleapis.com/adx-rtb-dictionaries/brands.txt) file. A list of all known agencies with their identifiers is available in the [agencies.txt](https://storage.googleapis.com/adx-rtb-dictionaries/agencies.txt) file. "entityName": "A String", # The name of the entity. This field is automatically fetched based on the type and ID. The value of this field is ignored in create and update operations. "entityType": "A String", # An optional field for specifying the type of the client entity: `ADVERTISER`, `BRAND`, or `AGENCY`. @@ -167,7 +167,7 @@

Method Details

{ # A client resource represents a client buyer—an agency, a brand, or an advertiser customer of the sponsor buyer. Users associated with the client buyer have restricted access to the Marketplace and certain other sections of the Authorized Buyers UI based on the role granted to the client buyer. All fields are required unless otherwise specified. "clientAccountId": "A String", # The globally-unique numerical ID of the client. The value of this field is ignored in create and update operations. - "clientName": "A String", # Name used to represent this client to publishers. You may have multiple clients that map to the same entity, but for each client the combination of `clientName` and entity must be unique. You can specify this field as empty. + "clientName": "A String", # Name used to represent this client to publishers. You may have multiple clients that map to the same entity, but for each client the combination of `clientName` and entity must be unique. You can specify this field as empty. Maximum length of 255 characters is allowed. "entityId": "A String", # Numerical identifier of the client entity. The entity can be an advertiser, a brand, or an agency. This identifier is unique among all the entities with the same type. The value of this field is ignored if the entity type is not provided. A list of all known advertisers with their identifiers is available in the [advertisers.txt](https://storage.googleapis.com/adx-rtb-dictionaries/advertisers.txt) file. A list of all known brands with their identifiers is available in the [brands.txt](https://storage.googleapis.com/adx-rtb-dictionaries/brands.txt) file. A list of all known agencies with their identifiers is available in the [agencies.txt](https://storage.googleapis.com/adx-rtb-dictionaries/agencies.txt) file. "entityName": "A String", # The name of the entity. This field is automatically fetched based on the type and ID. The value of this field is ignored in create and update operations. "entityType": "A String", # An optional field for specifying the type of the client entity: `ADVERTISER`, `BRAND`, or `AGENCY`. @@ -199,7 +199,7 @@

Method Details

"clients": [ # The returned list of clients. { # A client resource represents a client buyer—an agency, a brand, or an advertiser customer of the sponsor buyer. Users associated with the client buyer have restricted access to the Marketplace and certain other sections of the Authorized Buyers UI based on the role granted to the client buyer. All fields are required unless otherwise specified. "clientAccountId": "A String", # The globally-unique numerical ID of the client. The value of this field is ignored in create and update operations. - "clientName": "A String", # Name used to represent this client to publishers. You may have multiple clients that map to the same entity, but for each client the combination of `clientName` and entity must be unique. You can specify this field as empty. + "clientName": "A String", # Name used to represent this client to publishers. You may have multiple clients that map to the same entity, but for each client the combination of `clientName` and entity must be unique. You can specify this field as empty. Maximum length of 255 characters is allowed. "entityId": "A String", # Numerical identifier of the client entity. The entity can be an advertiser, a brand, or an agency. This identifier is unique among all the entities with the same type. The value of this field is ignored if the entity type is not provided. A list of all known advertisers with their identifiers is available in the [advertisers.txt](https://storage.googleapis.com/adx-rtb-dictionaries/advertisers.txt) file. A list of all known brands with their identifiers is available in the [brands.txt](https://storage.googleapis.com/adx-rtb-dictionaries/brands.txt) file. A list of all known agencies with their identifiers is available in the [agencies.txt](https://storage.googleapis.com/adx-rtb-dictionaries/agencies.txt) file. "entityName": "A String", # The name of the entity. This field is automatically fetched based on the type and ID. The value of this field is ignored in create and update operations. "entityType": "A String", # An optional field for specifying the type of the client entity: `ADVERTISER`, `BRAND`, or `AGENCY`. @@ -239,7 +239,7 @@

Method Details

{ # A client resource represents a client buyer—an agency, a brand, or an advertiser customer of the sponsor buyer. Users associated with the client buyer have restricted access to the Marketplace and certain other sections of the Authorized Buyers UI based on the role granted to the client buyer. All fields are required unless otherwise specified. "clientAccountId": "A String", # The globally-unique numerical ID of the client. The value of this field is ignored in create and update operations. - "clientName": "A String", # Name used to represent this client to publishers. You may have multiple clients that map to the same entity, but for each client the combination of `clientName` and entity must be unique. You can specify this field as empty. + "clientName": "A String", # Name used to represent this client to publishers. You may have multiple clients that map to the same entity, but for each client the combination of `clientName` and entity must be unique. You can specify this field as empty. Maximum length of 255 characters is allowed. "entityId": "A String", # Numerical identifier of the client entity. The entity can be an advertiser, a brand, or an agency. This identifier is unique among all the entities with the same type. The value of this field is ignored if the entity type is not provided. A list of all known advertisers with their identifiers is available in the [advertisers.txt](https://storage.googleapis.com/adx-rtb-dictionaries/advertisers.txt) file. A list of all known brands with their identifiers is available in the [brands.txt](https://storage.googleapis.com/adx-rtb-dictionaries/brands.txt) file. A list of all known agencies with their identifiers is available in the [agencies.txt](https://storage.googleapis.com/adx-rtb-dictionaries/agencies.txt) file. "entityName": "A String", # The name of the entity. This field is automatically fetched based on the type and ID. The value of this field is ignored in create and update operations. "entityType": "A String", # An optional field for specifying the type of the client entity: `ADVERTISER`, `BRAND`, or `AGENCY`. @@ -259,7 +259,7 @@

Method Details

{ # A client resource represents a client buyer—an agency, a brand, or an advertiser customer of the sponsor buyer. Users associated with the client buyer have restricted access to the Marketplace and certain other sections of the Authorized Buyers UI based on the role granted to the client buyer. All fields are required unless otherwise specified. "clientAccountId": "A String", # The globally-unique numerical ID of the client. The value of this field is ignored in create and update operations. - "clientName": "A String", # Name used to represent this client to publishers. You may have multiple clients that map to the same entity, but for each client the combination of `clientName` and entity must be unique. You can specify this field as empty. + "clientName": "A String", # Name used to represent this client to publishers. You may have multiple clients that map to the same entity, but for each client the combination of `clientName` and entity must be unique. You can specify this field as empty. Maximum length of 255 characters is allowed. "entityId": "A String", # Numerical identifier of the client entity. The entity can be an advertiser, a brand, or an agency. This identifier is unique among all the entities with the same type. The value of this field is ignored if the entity type is not provided. A list of all known advertisers with their identifiers is available in the [advertisers.txt](https://storage.googleapis.com/adx-rtb-dictionaries/advertisers.txt) file. A list of all known brands with their identifiers is available in the [brands.txt](https://storage.googleapis.com/adx-rtb-dictionaries/brands.txt) file. A list of all known agencies with their identifiers is available in the [agencies.txt](https://storage.googleapis.com/adx-rtb-dictionaries/agencies.txt) file. "entityName": "A String", # The name of the entity. This field is automatically fetched based on the type and ID. The value of this field is ignored in create and update operations. "entityType": "A String", # An optional field for specifying the type of the client entity: `ADVERTISER`, `BRAND`, or `AGENCY`. diff --git a/docs/dyn/analyticsadmin_v1alpha.accounts.html b/docs/dyn/analyticsadmin_v1alpha.accounts.html index 049d914400f..eb32f40aa9f 100644 --- a/docs/dyn/analyticsadmin_v1alpha.accounts.html +++ b/docs/dyn/analyticsadmin_v1alpha.accounts.html @@ -439,6 +439,7 @@

Method Details

"secretValue": "A String", # Output only. The measurement protocol secret value. Pass this value to the api_secret field of the Measurement Protocol API when sending hits to this secret's parent property. }, "property": { # A resource message representing a Google Analytics GA4 property. # A snapshot of a Property resource in change history. + "account": "A String", # Immutable. The resource name of the parent account Format: accounts/{account_id} Example: "accounts/123" "createTime": "A String", # Output only. Time when the entity was originally created. "currencyCode": "A String", # The currency type used in reports involving monetary values. Format: https://en.wikipedia.org/wiki/ISO_4217 Examples: "USD", "EUR", "JPY" "deleteTime": "A String", # Output only. If set, the time at which this property was trashed. If not set, then this property is not currently in the trash can. @@ -561,6 +562,7 @@

Method Details

"secretValue": "A String", # Output only. The measurement protocol secret value. Pass this value to the api_secret field of the Measurement Protocol API when sending hits to this secret's parent property. }, "property": { # A resource message representing a Google Analytics GA4 property. # A snapshot of a Property resource in change history. + "account": "A String", # Immutable. The resource name of the parent account Format: accounts/{account_id} Example: "accounts/123" "createTime": "A String", # Output only. Time when the entity was originally created. "currencyCode": "A String", # The currency type used in reports involving monetary values. Format: https://en.wikipedia.org/wiki/ISO_4217 Examples: "USD", "EUR", "JPY" "deleteTime": "A String", # Output only. If set, the time at which this property was trashed. If not set, then this property is not currently in the trash can. diff --git a/docs/dyn/analyticsadmin_v1alpha.properties.html b/docs/dyn/analyticsadmin_v1alpha.properties.html index 2415b39193a..be472f87507 100644 --- a/docs/dyn/analyticsadmin_v1alpha.properties.html +++ b/docs/dyn/analyticsadmin_v1alpha.properties.html @@ -177,6 +177,7 @@

Method Details

The object takes the form of: { # A resource message representing a Google Analytics GA4 property. + "account": "A String", # Immutable. The resource name of the parent account Format: accounts/{account_id} Example: "accounts/123" "createTime": "A String", # Output only. Time when the entity was originally created. "currencyCode": "A String", # The currency type used in reports involving monetary values. Format: https://en.wikipedia.org/wiki/ISO_4217 Examples: "USD", "EUR", "JPY" "deleteTime": "A String", # Output only. If set, the time at which this property was trashed. If not set, then this property is not currently in the trash can. @@ -199,6 +200,7 @@

Method Details

An object of the form: { # A resource message representing a Google Analytics GA4 property. + "account": "A String", # Immutable. The resource name of the parent account Format: accounts/{account_id} Example: "accounts/123" "createTime": "A String", # Output only. Time when the entity was originally created. "currencyCode": "A String", # The currency type used in reports involving monetary values. Format: https://en.wikipedia.org/wiki/ISO_4217 Examples: "USD", "EUR", "JPY" "deleteTime": "A String", # Output only. If set, the time at which this property was trashed. If not set, then this property is not currently in the trash can. @@ -228,6 +230,7 @@

Method Details

An object of the form: { # A resource message representing a Google Analytics GA4 property. + "account": "A String", # Immutable. The resource name of the parent account Format: accounts/{account_id} Example: "accounts/123" "createTime": "A String", # Output only. Time when the entity was originally created. "currencyCode": "A String", # The currency type used in reports involving monetary values. Format: https://en.wikipedia.org/wiki/ISO_4217 Examples: "USD", "EUR", "JPY" "deleteTime": "A String", # Output only. If set, the time at which this property was trashed. If not set, then this property is not currently in the trash can. @@ -257,6 +260,7 @@

Method Details

An object of the form: { # A resource message representing a Google Analytics GA4 property. + "account": "A String", # Immutable. The resource name of the parent account Format: accounts/{account_id} Example: "accounts/123" "createTime": "A String", # Output only. Time when the entity was originally created. "currencyCode": "A String", # The currency type used in reports involving monetary values. Format: https://en.wikipedia.org/wiki/ISO_4217 Examples: "USD", "EUR", "JPY" "deleteTime": "A String", # Output only. If set, the time at which this property was trashed. If not set, then this property is not currently in the trash can. @@ -334,6 +338,7 @@

Method Details

"nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages. "properties": [ # Results that matched the filter criteria and were accessible to the caller. { # A resource message representing a Google Analytics GA4 property. + "account": "A String", # Immutable. The resource name of the parent account Format: accounts/{account_id} Example: "accounts/123" "createTime": "A String", # Output only. Time when the entity was originally created. "currencyCode": "A String", # The currency type used in reports involving monetary values. Format: https://en.wikipedia.org/wiki/ISO_4217 Examples: "USD", "EUR", "JPY" "deleteTime": "A String", # Output only. If set, the time at which this property was trashed. If not set, then this property is not currently in the trash can. @@ -374,6 +379,7 @@

Method Details

The object takes the form of: { # A resource message representing a Google Analytics GA4 property. + "account": "A String", # Immutable. The resource name of the parent account Format: accounts/{account_id} Example: "accounts/123" "createTime": "A String", # Output only. Time when the entity was originally created. "currencyCode": "A String", # The currency type used in reports involving monetary values. Format: https://en.wikipedia.org/wiki/ISO_4217 Examples: "USD", "EUR", "JPY" "deleteTime": "A String", # Output only. If set, the time at which this property was trashed. If not set, then this property is not currently in the trash can. @@ -397,6 +403,7 @@

Method Details

An object of the form: { # A resource message representing a Google Analytics GA4 property. + "account": "A String", # Immutable. The resource name of the parent account Format: accounts/{account_id} Example: "accounts/123" "createTime": "A String", # Output only. Time when the entity was originally created. "currencyCode": "A String", # The currency type used in reports involving monetary values. Format: https://en.wikipedia.org/wiki/ISO_4217 Examples: "USD", "EUR", "JPY" "deleteTime": "A String", # Output only. If set, the time at which this property was trashed. If not set, then this property is not currently in the trash can. diff --git a/docs/dyn/apigee_v1.organizations.apiproducts.rateplans.html b/docs/dyn/apigee_v1.organizations.apiproducts.rateplans.html index e6a737c8641..84ea33cfc83 100644 --- a/docs/dyn/apigee_v1.organizations.apiproducts.rateplans.html +++ b/docs/dyn/apigee_v1.organizations.apiproducts.rateplans.html @@ -135,7 +135,7 @@

Method Details

}, "lastModifiedAt": "A String", # Output only. Time the rate plan was last modified in milliseconds since epoch. "name": "A String", # Output only. Name of the rate plan. - "paymentFundingModel": "A String", # Flag that specifies the billing account type, prepaid or postpaid. + "paymentFundingModel": "A String", # DEPRECATED: This field is no longer supported and will eventually be removed when Apigee Hybrid 1.5/1.6 is no longer supported. Instead, use the `billingType` field inside `DeveloperMonetizationConfig` resource. Flag that specifies the billing account type, prepaid or postpaid. "revenueShareRates": [ # Details of the revenue sharing model. { # API call volume range and the percentage of revenue to share with the developer when the total number of API calls is within the range. "end": "A String", # Ending value of the range. Set to 0 or `null` for the last range of values. @@ -189,7 +189,7 @@

Method Details

}, "lastModifiedAt": "A String", # Output only. Time the rate plan was last modified in milliseconds since epoch. "name": "A String", # Output only. Name of the rate plan. - "paymentFundingModel": "A String", # Flag that specifies the billing account type, prepaid or postpaid. + "paymentFundingModel": "A String", # DEPRECATED: This field is no longer supported and will eventually be removed when Apigee Hybrid 1.5/1.6 is no longer supported. Instead, use the `billingType` field inside `DeveloperMonetizationConfig` resource. Flag that specifies the billing account type, prepaid or postpaid. "revenueShareRates": [ # Details of the revenue sharing model. { # API call volume range and the percentage of revenue to share with the developer when the total number of API calls is within the range. "end": "A String", # Ending value of the range. Set to 0 or `null` for the last range of values. @@ -250,7 +250,7 @@

Method Details

}, "lastModifiedAt": "A String", # Output only. Time the rate plan was last modified in milliseconds since epoch. "name": "A String", # Output only. Name of the rate plan. - "paymentFundingModel": "A String", # Flag that specifies the billing account type, prepaid or postpaid. + "paymentFundingModel": "A String", # DEPRECATED: This field is no longer supported and will eventually be removed when Apigee Hybrid 1.5/1.6 is no longer supported. Instead, use the `billingType` field inside `DeveloperMonetizationConfig` resource. Flag that specifies the billing account type, prepaid or postpaid. "revenueShareRates": [ # Details of the revenue sharing model. { # API call volume range and the percentage of revenue to share with the developer when the total number of API calls is within the range. "end": "A String", # Ending value of the range. Set to 0 or `null` for the last range of values. @@ -311,7 +311,7 @@

Method Details

}, "lastModifiedAt": "A String", # Output only. Time the rate plan was last modified in milliseconds since epoch. "name": "A String", # Output only. Name of the rate plan. - "paymentFundingModel": "A String", # Flag that specifies the billing account type, prepaid or postpaid. + "paymentFundingModel": "A String", # DEPRECATED: This field is no longer supported and will eventually be removed when Apigee Hybrid 1.5/1.6 is no longer supported. Instead, use the `billingType` field inside `DeveloperMonetizationConfig` resource. Flag that specifies the billing account type, prepaid or postpaid. "revenueShareRates": [ # Details of the revenue sharing model. { # API call volume range and the percentage of revenue to share with the developer when the total number of API calls is within the range. "end": "A String", # Ending value of the range. Set to 0 or `null` for the last range of values. @@ -384,7 +384,7 @@

Method Details

}, "lastModifiedAt": "A String", # Output only. Time the rate plan was last modified in milliseconds since epoch. "name": "A String", # Output only. Name of the rate plan. - "paymentFundingModel": "A String", # Flag that specifies the billing account type, prepaid or postpaid. + "paymentFundingModel": "A String", # DEPRECATED: This field is no longer supported and will eventually be removed when Apigee Hybrid 1.5/1.6 is no longer supported. Instead, use the `billingType` field inside `DeveloperMonetizationConfig` resource. Flag that specifies the billing account type, prepaid or postpaid. "revenueShareRates": [ # Details of the revenue sharing model. { # API call volume range and the percentage of revenue to share with the developer when the total number of API calls is within the range. "end": "A String", # Ending value of the range. Set to 0 or `null` for the last range of values. @@ -442,7 +442,7 @@

Method Details

}, "lastModifiedAt": "A String", # Output only. Time the rate plan was last modified in milliseconds since epoch. "name": "A String", # Output only. Name of the rate plan. - "paymentFundingModel": "A String", # Flag that specifies the billing account type, prepaid or postpaid. + "paymentFundingModel": "A String", # DEPRECATED: This field is no longer supported and will eventually be removed when Apigee Hybrid 1.5/1.6 is no longer supported. Instead, use the `billingType` field inside `DeveloperMonetizationConfig` resource. Flag that specifies the billing account type, prepaid or postpaid. "revenueShareRates": [ # Details of the revenue sharing model. { # API call volume range and the percentage of revenue to share with the developer when the total number of API calls is within the range. "end": "A String", # Ending value of the range. Set to 0 or `null` for the last range of values. @@ -496,7 +496,7 @@

Method Details

}, "lastModifiedAt": "A String", # Output only. Time the rate plan was last modified in milliseconds since epoch. "name": "A String", # Output only. Name of the rate plan. - "paymentFundingModel": "A String", # Flag that specifies the billing account type, prepaid or postpaid. + "paymentFundingModel": "A String", # DEPRECATED: This field is no longer supported and will eventually be removed when Apigee Hybrid 1.5/1.6 is no longer supported. Instead, use the `billingType` field inside `DeveloperMonetizationConfig` resource. Flag that specifies the billing account type, prepaid or postpaid. "revenueShareRates": [ # Details of the revenue sharing model. { # API call volume range and the percentage of revenue to share with the developer when the total number of API calls is within the range. "end": "A String", # Ending value of the range. Set to 0 or `null` for the last range of values. diff --git a/docs/dyn/apigee_v1.organizations.html b/docs/dyn/apigee_v1.organizations.html index c755cc1951f..4edbf739500 100644 --- a/docs/dyn/apigee_v1.organizations.html +++ b/docs/dyn/apigee_v1.organizations.html @@ -230,7 +230,7 @@

Method Details

"createdAt": "A String", # Output only. Time that the Apigee organization was created in milliseconds since epoch. "customerName": "A String", # Not used by Apigee. "description": "A String", # Description of the Apigee organization. - "displayName": "A String", + "displayName": "A String", # Display name for the Apigee organization. Unused, but reserved for future use. "environments": [ # Output only. List of environments in the Apigee organization. "A String", ], @@ -351,7 +351,7 @@

Method Details

"createdAt": "A String", # Output only. Time that the Apigee organization was created in milliseconds since epoch. "customerName": "A String", # Not used by Apigee. "description": "A String", # Description of the Apigee organization. - "displayName": "A String", + "displayName": "A String", # Display name for the Apigee organization. Unused, but reserved for future use. "environments": [ # Output only. List of environments in the Apigee organization. "A String", ], @@ -606,7 +606,7 @@

Method Details

"createdAt": "A String", # Output only. Time that the Apigee organization was created in milliseconds since epoch. "customerName": "A String", # Not used by Apigee. "description": "A String", # Description of the Apigee organization. - "displayName": "A String", + "displayName": "A String", # Display name for the Apigee organization. Unused, but reserved for future use. "environments": [ # Output only. List of environments in the Apigee organization. "A String", ], @@ -656,7 +656,7 @@

Method Details

"createdAt": "A String", # Output only. Time that the Apigee organization was created in milliseconds since epoch. "customerName": "A String", # Not used by Apigee. "description": "A String", # Description of the Apigee organization. - "displayName": "A String", + "displayName": "A String", # Display name for the Apigee organization. Unused, but reserved for future use. "environments": [ # Output only. List of environments in the Apigee organization. "A String", ], diff --git a/docs/dyn/appengine_v1.apps.services.html b/docs/dyn/appengine_v1.apps.services.html index 8a2126f8257..2f63ab0050d 100644 --- a/docs/dyn/appengine_v1.apps.services.html +++ b/docs/dyn/appengine_v1.apps.services.html @@ -156,6 +156,9 @@

Method Details

{ # A Service resource is a logical component of an application that can share state and communicate in a secure fashion with other services. For example, an application that handles customer requests might include separate services to handle tasks such as backend data analysis or API requests from mobile devices. Each service has a collection of versions that define a specific set of code used to implement the functionality of that service. "id": "A String", # Relative name of the service within the application. Example: default.@OutputOnly + "labels": { # A set of labels to apply to this service. Labels are key/value pairs that describe the service and all resources that belong to it (e.g., versions). The labels can be used to search and group resources, and are propagated to the usage and billing reports, enabling fine-grain analysis of costs. An example of using labels is to tag resources belonging to different environments (e.g., "env=prod", "env=qa"). Label keys and values can be no longer than 63 characters, can only contain lowercase letters, numeric characters, underscores, dashes, and international characters. Label keys must start with a lowercase letter or an international character. Each service can have at most 32 labels. + "a_key": "A String", + }, "name": "A String", # Full path to the Service resource in the API. Example: apps/myapp/services/default.@OutputOnly "networkSettings": { # A NetworkSettings resource is a container for ingress settings for a version or service. # Ingress settings for this service. Will apply to all versions. "ingressTrafficAllowed": "A String", # The ingress settings for version or service. @@ -190,6 +193,9 @@

Method Details

"services": [ # The services belonging to the requested application. { # A Service resource is a logical component of an application that can share state and communicate in a secure fashion with other services. For example, an application that handles customer requests might include separate services to handle tasks such as backend data analysis or API requests from mobile devices. Each service has a collection of versions that define a specific set of code used to implement the functionality of that service. "id": "A String", # Relative name of the service within the application. Example: default.@OutputOnly + "labels": { # A set of labels to apply to this service. Labels are key/value pairs that describe the service and all resources that belong to it (e.g., versions). The labels can be used to search and group resources, and are propagated to the usage and billing reports, enabling fine-grain analysis of costs. An example of using labels is to tag resources belonging to different environments (e.g., "env=prod", "env=qa"). Label keys and values can be no longer than 63 characters, can only contain lowercase letters, numeric characters, underscores, dashes, and international characters. Label keys must start with a lowercase letter or an international character. Each service can have at most 32 labels. + "a_key": "A String", + }, "name": "A String", # Full path to the Service resource in the API. Example: apps/myapp/services/default.@OutputOnly "networkSettings": { # A NetworkSettings resource is a container for ingress settings for a version or service. # Ingress settings for this service. Will apply to all versions. "ingressTrafficAllowed": "A String", # The ingress settings for version or service. @@ -231,6 +237,9 @@

Method Details

{ # A Service resource is a logical component of an application that can share state and communicate in a secure fashion with other services. For example, an application that handles customer requests might include separate services to handle tasks such as backend data analysis or API requests from mobile devices. Each service has a collection of versions that define a specific set of code used to implement the functionality of that service. "id": "A String", # Relative name of the service within the application. Example: default.@OutputOnly + "labels": { # A set of labels to apply to this service. Labels are key/value pairs that describe the service and all resources that belong to it (e.g., versions). The labels can be used to search and group resources, and are propagated to the usage and billing reports, enabling fine-grain analysis of costs. An example of using labels is to tag resources belonging to different environments (e.g., "env=prod", "env=qa"). Label keys and values can be no longer than 63 characters, can only contain lowercase letters, numeric characters, underscores, dashes, and international characters. Label keys must start with a lowercase letter or an international character. Each service can have at most 32 labels. + "a_key": "A String", + }, "name": "A String", # Full path to the Service resource in the API. Example: apps/myapp/services/default.@OutputOnly "networkSettings": { # A NetworkSettings resource is a container for ingress settings for a version or service. # Ingress settings for this service. Will apply to all versions. "ingressTrafficAllowed": "A String", # The ingress settings for version or service. diff --git a/docs/dyn/appengine_v1beta.apps.services.html b/docs/dyn/appengine_v1beta.apps.services.html index f12977b9eb4..0dea8a6fb54 100644 --- a/docs/dyn/appengine_v1beta.apps.services.html +++ b/docs/dyn/appengine_v1beta.apps.services.html @@ -156,6 +156,9 @@

Method Details

{ # A Service resource is a logical component of an application that can share state and communicate in a secure fashion with other services. For example, an application that handles customer requests might include separate services to handle tasks such as backend data analysis or API requests from mobile devices. Each service has a collection of versions that define a specific set of code used to implement the functionality of that service. "id": "A String", # Relative name of the service within the application. Example: default.@OutputOnly + "labels": { # A set of labels to apply to this service. Labels are key/value pairs that describe the service and all resources that belong to it (e.g., versions). The labels can be used to search and group resources, and are propagated to the usage and billing reports, enabling fine-grain analysis of costs. An example of using labels is to tag resources belonging to different environments (e.g., "env=prod", "env=qa"). Label keys and values can be no longer than 63 characters, can only contain lowercase letters, numeric characters, underscores, dashes, and international characters. Label keys must start with a lowercase letter or an international character. Each service can have at most 32 labels. + "a_key": "A String", + }, "name": "A String", # Full path to the Service resource in the API. Example: apps/myapp/services/default.@OutputOnly "networkSettings": { # A NetworkSettings resource is a container for ingress settings for a version or service. # Ingress settings for this service. Will apply to all versions. "ingressTrafficAllowed": "A String", # The ingress settings for version or service. @@ -190,6 +193,9 @@

Method Details

"services": [ # The services belonging to the requested application. { # A Service resource is a logical component of an application that can share state and communicate in a secure fashion with other services. For example, an application that handles customer requests might include separate services to handle tasks such as backend data analysis or API requests from mobile devices. Each service has a collection of versions that define a specific set of code used to implement the functionality of that service. "id": "A String", # Relative name of the service within the application. Example: default.@OutputOnly + "labels": { # A set of labels to apply to this service. Labels are key/value pairs that describe the service and all resources that belong to it (e.g., versions). The labels can be used to search and group resources, and are propagated to the usage and billing reports, enabling fine-grain analysis of costs. An example of using labels is to tag resources belonging to different environments (e.g., "env=prod", "env=qa"). Label keys and values can be no longer than 63 characters, can only contain lowercase letters, numeric characters, underscores, dashes, and international characters. Label keys must start with a lowercase letter or an international character. Each service can have at most 32 labels. + "a_key": "A String", + }, "name": "A String", # Full path to the Service resource in the API. Example: apps/myapp/services/default.@OutputOnly "networkSettings": { # A NetworkSettings resource is a container for ingress settings for a version or service. # Ingress settings for this service. Will apply to all versions. "ingressTrafficAllowed": "A String", # The ingress settings for version or service. @@ -231,6 +237,9 @@

Method Details

{ # A Service resource is a logical component of an application that can share state and communicate in a secure fashion with other services. For example, an application that handles customer requests might include separate services to handle tasks such as backend data analysis or API requests from mobile devices. Each service has a collection of versions that define a specific set of code used to implement the functionality of that service. "id": "A String", # Relative name of the service within the application. Example: default.@OutputOnly + "labels": { # A set of labels to apply to this service. Labels are key/value pairs that describe the service and all resources that belong to it (e.g., versions). The labels can be used to search and group resources, and are propagated to the usage and billing reports, enabling fine-grain analysis of costs. An example of using labels is to tag resources belonging to different environments (e.g., "env=prod", "env=qa"). Label keys and values can be no longer than 63 characters, can only contain lowercase letters, numeric characters, underscores, dashes, and international characters. Label keys must start with a lowercase letter or an international character. Each service can have at most 32 labels. + "a_key": "A String", + }, "name": "A String", # Full path to the Service resource in the API. Example: apps/myapp/services/default.@OutputOnly "networkSettings": { # A NetworkSettings resource is a container for ingress settings for a version or service. # Ingress settings for this service. Will apply to all versions. "ingressTrafficAllowed": "A String", # The ingress settings for version or service. diff --git a/docs/dyn/area120tables_v1alpha1.tables.html b/docs/dyn/area120tables_v1alpha1.tables.html index e37aa597081..19c1602031c 100644 --- a/docs/dyn/area120tables_v1alpha1.tables.html +++ b/docs/dyn/area120tables_v1alpha1.tables.html @@ -128,6 +128,7 @@

Method Details

}, "multipleValuesDisallowed": True or False, # Optional. Indicates whether or not multiple values are allowed for array types where such a restriction is possible. "name": "A String", # column name + "readonly": True or False, # Optional. Indicates that values for the column cannot be set by the user. "relationshipDetails": { # Details about a relationship column. # Optional. Additional details about a relationship column. Specified when data_type is relationship. "linkedTable": "A String", # The name of the table this relationship is linked to. }, @@ -182,6 +183,7 @@

Method Details

}, "multipleValuesDisallowed": True or False, # Optional. Indicates whether or not multiple values are allowed for array types where such a restriction is possible. "name": "A String", # column name + "readonly": True or False, # Optional. Indicates that values for the column cannot be set by the user. "relationshipDetails": { # Details about a relationship column. # Optional. Additional details about a relationship column. Specified when data_type is relationship. "linkedTable": "A String", # The name of the table this relationship is linked to. }, diff --git a/docs/dyn/area120tables_v1alpha1.workspaces.html b/docs/dyn/area120tables_v1alpha1.workspaces.html index 180a2727592..c9893174330 100644 --- a/docs/dyn/area120tables_v1alpha1.workspaces.html +++ b/docs/dyn/area120tables_v1alpha1.workspaces.html @@ -128,6 +128,7 @@

Method Details

}, "multipleValuesDisallowed": True or False, # Optional. Indicates whether or not multiple values are allowed for array types where such a restriction is possible. "name": "A String", # column name + "readonly": True or False, # Optional. Indicates that values for the column cannot be set by the user. "relationshipDetails": { # Details about a relationship column. # Optional. Additional details about a relationship column. Specified when data_type is relationship. "linkedTable": "A String", # The name of the table this relationship is linked to. }, @@ -189,6 +190,7 @@

Method Details

}, "multipleValuesDisallowed": True or False, # Optional. Indicates whether or not multiple values are allowed for array types where such a restriction is possible. "name": "A String", # column name + "readonly": True or False, # Optional. Indicates that values for the column cannot be set by the user. "relationshipDetails": { # Details about a relationship column. # Optional. Additional details about a relationship column. Specified when data_type is relationship. "linkedTable": "A String", # The name of the table this relationship is linked to. }, diff --git a/docs/dyn/assuredworkloads_v1.organizations.locations.workloads.html b/docs/dyn/assuredworkloads_v1.organizations.locations.workloads.html index 0fb6d01608c..2e84ee5293d 100644 --- a/docs/dyn/assuredworkloads_v1.organizations.locations.workloads.html +++ b/docs/dyn/assuredworkloads_v1.organizations.locations.workloads.html @@ -111,7 +111,7 @@

Method Details

The object takes the form of: { # An Workload object for managing highly regulated workloads of cloud customers. - "billingAccount": "A String", # Required. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`. + "billingAccount": "A String", # Optional. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`. "complianceRegime": "A String", # Required. Immutable. Compliance Regime associated with this workload. "createTime": "A String", # Output only. Immutable. The Workload creation timestamp. "displayName": "A String", # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload @@ -204,7 +204,7 @@

Method Details

An object of the form: { # An Workload object for managing highly regulated workloads of cloud customers. - "billingAccount": "A String", # Required. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`. + "billingAccount": "A String", # Optional. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`. "complianceRegime": "A String", # Required. Immutable. Compliance Regime associated with this workload. "createTime": "A String", # Output only. Immutable. The Workload creation timestamp. "displayName": "A String", # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload @@ -255,7 +255,7 @@

Method Details

"nextPageToken": "A String", # The next page token. Return empty if reached the last page. "workloads": [ # List of Workloads under a given parent. { # An Workload object for managing highly regulated workloads of cloud customers. - "billingAccount": "A String", # Required. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`. + "billingAccount": "A String", # Optional. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`. "complianceRegime": "A String", # Required. Immutable. Compliance Regime associated with this workload. "createTime": "A String", # Output only. Immutable. The Workload creation timestamp. "displayName": "A String", # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload @@ -311,7 +311,7 @@

Method Details

The object takes the form of: { # An Workload object for managing highly regulated workloads of cloud customers. - "billingAccount": "A String", # Required. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`. + "billingAccount": "A String", # Optional. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`. "complianceRegime": "A String", # Required. Immutable. Compliance Regime associated with this workload. "createTime": "A String", # Output only. Immutable. The Workload creation timestamp. "displayName": "A String", # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload @@ -350,7 +350,7 @@

Method Details

An object of the form: { # An Workload object for managing highly regulated workloads of cloud customers. - "billingAccount": "A String", # Required. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`. + "billingAccount": "A String", # Optional. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`. "complianceRegime": "A String", # Required. Immutable. Compliance Regime associated with this workload. "createTime": "A String", # Output only. Immutable. The Workload creation timestamp. "displayName": "A String", # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload diff --git a/docs/dyn/bigquery_v2.jobs.html b/docs/dyn/bigquery_v2.jobs.html index 69ae2ad53fa..b9f6b6fcd80 100644 --- a/docs/dyn/bigquery_v2.jobs.html +++ b/docs/dyn/bigquery_v2.jobs.html @@ -82,7 +82,7 @@

Instance Methods

Close httplib2 connections.

delete(projectId, jobId, location=None)

-

Requests that a job is deleted. This call will return when the job is deleted. This method is available in limited preview.

+

Requests the deletion of the metadata of a job. This call returns when the job's metadata is deleted.

get(projectId, jobId, location=None)

Returns information about a specific job. Job information is available for a six month period after creation. Requires that you're the person who ran the job, or have the Is Owner project role.

@@ -728,11 +728,11 @@

Method Details

delete(projectId, jobId, location=None) -
Requests that a job is deleted. This call will return when the job is deleted. This method is available in limited preview.
+  
Requests the deletion of the metadata of a job. This call returns when the job's metadata is deleted.
 
 Args:
-  projectId: string, Required. Project ID of the job to be deleted. (required)
-  jobId: string, Required. Job ID of the job to be deleted. If this is a parent job which has child jobs, all child jobs will be deleted as well. Deletion of child jobs directly is not allowed. (required)
+  projectId: string, Required. Project ID of the job for which metadata is to be deleted. (required)
+  jobId: string, Required. Job ID of the job for which metadata is to be deleted. If this is a parent job which has child jobs, the metadata from all child jobs will be deleted as well. Direct deletion of the metadata of child jobs is not allowed. (required)
   location: string, The geographic location of the job. Required. See details at: https://cloud.google.com/bigquery/docs/locations#specifying_your_location.
 
diff --git a/docs/dyn/bigquery_v2.models.html b/docs/dyn/bigquery_v2.models.html index 3679ea8ebc0..81bd48f6aea 100644 --- a/docs/dyn/bigquery_v2.models.html +++ b/docs/dyn/bigquery_v2.models.html @@ -387,7 +387,12 @@

Method Details

"autoArima": True or False, # Whether to enable auto ARIMA or not. "autoArimaMaxOrder": "A String", # The max value of non-seasonal p and q. "batchSize": "A String", # Batch size for dnn models. + "boosterType": "A String", # Booster type for boosted tree models. "cleanSpikesAndDips": True or False, # If true, clean spikes and dips in the input time series. + "colsampleBylevel": 3.14, # Subsample ratio of columns for each level for boosted tree models. + "colsampleBynode": 3.14, # Subsample ratio of columns for each node(split) for boosted tree models. + "colsampleBytree": 3.14, # Subsample ratio of columns when constructing each tree for boosted tree models. + "dartNormalizeType": "A String", # Type of normalization algorithm for boosted tree models using dart booster. "dataFrequency": "A String", # The data frequency of a time series. "dataSplitColumn": "A String", # The column to split data with. This column won't be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest of data will be used as training data. The format should be double. Accurate to two decimal places. Default value is 0.2. @@ -422,6 +427,7 @@

Method Details

"maxTreeDepth": "A String", # Maximum depth of a tree for boosted tree models. "minRelativeProgress": 3.14, # When early_stop is true, stops training when accuracy improvement is less than 'min_relative_progress'. Used only for iterative training algorithms. "minSplitLoss": 3.14, # Minimum split loss for boosted tree models. + "minTreeChildWeight": "A String", # Minimum sum of instance weight needed in a child for boosted tree models. "modelUri": "A String", # Google Cloud Storage URI from which the model was imported. Only applicable for imported models. "nonSeasonalOrder": { # Arima order, can be used for both non-seasonal and seasonal parts. # A specification of the non-seasonal part of the ARIMA model: the three components (p, d, q) are the AR order, the degree of differencing, and the MA order. "d": "A String", # Order of the differencing part. @@ -430,6 +436,7 @@

Method Details

}, "numClusters": "A String", # Number of clusters for clustering models. "numFactors": "A String", # Num factors specified for matrix factorization models. + "numParallelTree": "A String", # Number of parallel trees constructed during each iteration for boosted tree models. "optimizationStrategy": "A String", # Optimization strategy for training linear regression models. "preserveInputStructs": True or False, # Whether to preserve the input structs in output feature names. Suppose there is a struct A with field b. When false (default), the output feature name is A_b. When true, the output feature name is A.b. "subsample": 3.14, # Subsample fraction of the training data to grow tree to prevent overfitting for boosted tree models. @@ -439,6 +446,7 @@

Method Details

"A String", ], "timeSeriesTimestampColumn": "A String", # Column to be designated as time series timestamp for ARIMA model. + "treeMethod": "A String", # Tree construction algorithm for boosted tree models. "userColumn": "A String", # User column specified for matrix factorization models. "walsAlpha": 3.14, # Hyperparameter for matrix factoration when implicit feedback type is specified. "warmStart": True or False, # Whether to train a model from the last checkpoint. @@ -729,7 +737,12 @@

Method Details

"autoArima": True or False, # Whether to enable auto ARIMA or not. "autoArimaMaxOrder": "A String", # The max value of non-seasonal p and q. "batchSize": "A String", # Batch size for dnn models. + "boosterType": "A String", # Booster type for boosted tree models. "cleanSpikesAndDips": True or False, # If true, clean spikes and dips in the input time series. + "colsampleBylevel": 3.14, # Subsample ratio of columns for each level for boosted tree models. + "colsampleBynode": 3.14, # Subsample ratio of columns for each node(split) for boosted tree models. + "colsampleBytree": 3.14, # Subsample ratio of columns when constructing each tree for boosted tree models. + "dartNormalizeType": "A String", # Type of normalization algorithm for boosted tree models using dart booster. "dataFrequency": "A String", # The data frequency of a time series. "dataSplitColumn": "A String", # The column to split data with. This column won't be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest of data will be used as training data. The format should be double. Accurate to two decimal places. Default value is 0.2. @@ -764,6 +777,7 @@

Method Details

"maxTreeDepth": "A String", # Maximum depth of a tree for boosted tree models. "minRelativeProgress": 3.14, # When early_stop is true, stops training when accuracy improvement is less than 'min_relative_progress'. Used only for iterative training algorithms. "minSplitLoss": 3.14, # Minimum split loss for boosted tree models. + "minTreeChildWeight": "A String", # Minimum sum of instance weight needed in a child for boosted tree models. "modelUri": "A String", # Google Cloud Storage URI from which the model was imported. Only applicable for imported models. "nonSeasonalOrder": { # Arima order, can be used for both non-seasonal and seasonal parts. # A specification of the non-seasonal part of the ARIMA model: the three components (p, d, q) are the AR order, the degree of differencing, and the MA order. "d": "A String", # Order of the differencing part. @@ -772,6 +786,7 @@

Method Details

}, "numClusters": "A String", # Number of clusters for clustering models. "numFactors": "A String", # Num factors specified for matrix factorization models. + "numParallelTree": "A String", # Number of parallel trees constructed during each iteration for boosted tree models. "optimizationStrategy": "A String", # Optimization strategy for training linear regression models. "preserveInputStructs": True or False, # Whether to preserve the input structs in output feature names. Suppose there is a struct A with field b. When false (default), the output feature name is A_b. When true, the output feature name is A.b. "subsample": 3.14, # Subsample fraction of the training data to grow tree to prevent overfitting for boosted tree models. @@ -781,6 +796,7 @@

Method Details

"A String", ], "timeSeriesTimestampColumn": "A String", # Column to be designated as time series timestamp for ARIMA model. + "treeMethod": "A String", # Tree construction algorithm for boosted tree models. "userColumn": "A String", # User column specified for matrix factorization models. "walsAlpha": 3.14, # Hyperparameter for matrix factoration when implicit feedback type is specified. "warmStart": True or False, # Whether to train a model from the last checkpoint. @@ -1084,7 +1100,12 @@

Method Details

"autoArima": True or False, # Whether to enable auto ARIMA or not. "autoArimaMaxOrder": "A String", # The max value of non-seasonal p and q. "batchSize": "A String", # Batch size for dnn models. + "boosterType": "A String", # Booster type for boosted tree models. "cleanSpikesAndDips": True or False, # If true, clean spikes and dips in the input time series. + "colsampleBylevel": 3.14, # Subsample ratio of columns for each level for boosted tree models. + "colsampleBynode": 3.14, # Subsample ratio of columns for each node(split) for boosted tree models. + "colsampleBytree": 3.14, # Subsample ratio of columns when constructing each tree for boosted tree models. + "dartNormalizeType": "A String", # Type of normalization algorithm for boosted tree models using dart booster. "dataFrequency": "A String", # The data frequency of a time series. "dataSplitColumn": "A String", # The column to split data with. This column won't be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest of data will be used as training data. The format should be double. Accurate to two decimal places. Default value is 0.2. @@ -1119,6 +1140,7 @@

Method Details

"maxTreeDepth": "A String", # Maximum depth of a tree for boosted tree models. "minRelativeProgress": 3.14, # When early_stop is true, stops training when accuracy improvement is less than 'min_relative_progress'. Used only for iterative training algorithms. "minSplitLoss": 3.14, # Minimum split loss for boosted tree models. + "minTreeChildWeight": "A String", # Minimum sum of instance weight needed in a child for boosted tree models. "modelUri": "A String", # Google Cloud Storage URI from which the model was imported. Only applicable for imported models. "nonSeasonalOrder": { # Arima order, can be used for both non-seasonal and seasonal parts. # A specification of the non-seasonal part of the ARIMA model: the three components (p, d, q) are the AR order, the degree of differencing, and the MA order. "d": "A String", # Order of the differencing part. @@ -1127,6 +1149,7 @@

Method Details

}, "numClusters": "A String", # Number of clusters for clustering models. "numFactors": "A String", # Num factors specified for matrix factorization models. + "numParallelTree": "A String", # Number of parallel trees constructed during each iteration for boosted tree models. "optimizationStrategy": "A String", # Optimization strategy for training linear regression models. "preserveInputStructs": True or False, # Whether to preserve the input structs in output feature names. Suppose there is a struct A with field b. When false (default), the output feature name is A_b. When true, the output feature name is A.b. "subsample": 3.14, # Subsample fraction of the training data to grow tree to prevent overfitting for boosted tree models. @@ -1136,6 +1159,7 @@

Method Details

"A String", ], "timeSeriesTimestampColumn": "A String", # Column to be designated as time series timestamp for ARIMA model. + "treeMethod": "A String", # Tree construction algorithm for boosted tree models. "userColumn": "A String", # User column specified for matrix factorization models. "walsAlpha": 3.14, # Hyperparameter for matrix factoration when implicit feedback type is specified. "warmStart": True or False, # Whether to train a model from the last checkpoint. @@ -1414,7 +1438,12 @@

Method Details

"autoArima": True or False, # Whether to enable auto ARIMA or not. "autoArimaMaxOrder": "A String", # The max value of non-seasonal p and q. "batchSize": "A String", # Batch size for dnn models. + "boosterType": "A String", # Booster type for boosted tree models. "cleanSpikesAndDips": True or False, # If true, clean spikes and dips in the input time series. + "colsampleBylevel": 3.14, # Subsample ratio of columns for each level for boosted tree models. + "colsampleBynode": 3.14, # Subsample ratio of columns for each node(split) for boosted tree models. + "colsampleBytree": 3.14, # Subsample ratio of columns when constructing each tree for boosted tree models. + "dartNormalizeType": "A String", # Type of normalization algorithm for boosted tree models using dart booster. "dataFrequency": "A String", # The data frequency of a time series. "dataSplitColumn": "A String", # The column to split data with. This column won't be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest of data will be used as training data. The format should be double. Accurate to two decimal places. Default value is 0.2. @@ -1449,6 +1478,7 @@

Method Details

"maxTreeDepth": "A String", # Maximum depth of a tree for boosted tree models. "minRelativeProgress": 3.14, # When early_stop is true, stops training when accuracy improvement is less than 'min_relative_progress'. Used only for iterative training algorithms. "minSplitLoss": 3.14, # Minimum split loss for boosted tree models. + "minTreeChildWeight": "A String", # Minimum sum of instance weight needed in a child for boosted tree models. "modelUri": "A String", # Google Cloud Storage URI from which the model was imported. Only applicable for imported models. "nonSeasonalOrder": { # Arima order, can be used for both non-seasonal and seasonal parts. # A specification of the non-seasonal part of the ARIMA model: the three components (p, d, q) are the AR order, the degree of differencing, and the MA order. "d": "A String", # Order of the differencing part. @@ -1457,6 +1487,7 @@

Method Details

}, "numClusters": "A String", # Number of clusters for clustering models. "numFactors": "A String", # Num factors specified for matrix factorization models. + "numParallelTree": "A String", # Number of parallel trees constructed during each iteration for boosted tree models. "optimizationStrategy": "A String", # Optimization strategy for training linear regression models. "preserveInputStructs": True or False, # Whether to preserve the input structs in output feature names. Suppose there is a struct A with field b. When false (default), the output feature name is A_b. When true, the output feature name is A.b. "subsample": 3.14, # Subsample fraction of the training data to grow tree to prevent overfitting for boosted tree models. @@ -1466,6 +1497,7 @@

Method Details

"A String", ], "timeSeriesTimestampColumn": "A String", # Column to be designated as time series timestamp for ARIMA model. + "treeMethod": "A String", # Tree construction algorithm for boosted tree models. "userColumn": "A String", # User column specified for matrix factorization models. "walsAlpha": 3.14, # Hyperparameter for matrix factoration when implicit feedback type is specified. "warmStart": True or False, # Whether to train a model from the last checkpoint. diff --git a/docs/dyn/clouderrorreporting_v1beta1.projects.events.html b/docs/dyn/clouderrorreporting_v1beta1.projects.events.html index 4742aff4dce..75a2e1cedd9 100644 --- a/docs/dyn/clouderrorreporting_v1beta1.projects.events.html +++ b/docs/dyn/clouderrorreporting_v1beta1.projects.events.html @@ -205,7 +205,7 @@

Method Details

], "user": "A String", # The user who caused or was affected by the crash. This can be a user ID, an email address, or an arbitrary token that uniquely identifies the user. When sending an error report, leave this field empty if the user was not logged in. In this case the Error Reporting system will use other data, such as remote IP address, to distinguish affected users. See `affected_users_count` in `ErrorGroupStats`. }, - "eventTime": "A String", # Optional. Time when the event occurred. If not provided, the time when the event was received by the Error Reporting system will be used. + "eventTime": "A String", # Optional. Time when the event occurred. If not provided, the time when the event was received by the Error Reporting system is used. If provided, the time must not exceed the [logs retention period](https://cloud.google.com/logging/quotas#logs_retention_periods) in the past, or be more than 24 hours in the future. If an invalid time is provided, then an error is returned. "message": "A String", # Required. The error message. If no `context.reportLocation` is provided, the message must contain a header (typically consisting of the exception type name and an error message) and an exception stack trace in one of the supported programming languages and formats. Supported languages are Java, Python, JavaScript, Ruby, C#, PHP, and Go. Supported stack trace formats are: * **Java**: Must be the return value of [`Throwable.printStackTrace()`](https://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html#printStackTrace%28%29). * **Python**: Must be the return value of [`traceback.format_exc()`](https://docs.python.org/2/library/traceback.html#traceback.format_exc). * **JavaScript**: Must be the value of [`error.stack`](https://github.com/v8/v8/wiki/Stack-Trace-API) as returned by V8. * **Ruby**: Must contain frames returned by [`Exception.backtrace`](https://ruby-doc.org/core-2.2.0/Exception.html#method-i-backtrace). * **C#**: Must be the return value of [`Exception.ToString()`](https://msdn.microsoft.com/en-us/library/system.exception.tostring.aspx). * **PHP**: Must start with `PHP (Notice|Parse error|Fatal error|Warning)` and contain the result of [`(string)$exception`](http://php.net/manual/en/exception.tostring.php). * **Go**: Must be the return value of [`runtime.Stack()`](https://golang.org/pkg/runtime/debug/#Stack). "serviceContext": { # Describes a running service that sends errors. Its version changes over time and multiple versions can run in parallel. # Required. The service context in which this error has occurred. "resourceType": "A String", # Type of the MonitoredResource. List of possible values: https://cloud.google.com/monitoring/api/resources Value is set automatically for incoming errors and must not be set when reporting errors. diff --git a/docs/dyn/cloudidentity_v1.groups.html b/docs/dyn/cloudidentity_v1.groups.html index 8ea66c58157..27402fd7643 100644 --- a/docs/dyn/cloudidentity_v1.groups.html +++ b/docs/dyn/cloudidentity_v1.groups.html @@ -130,7 +130,7 @@

Method Details

"dynamicGroupMetadata": { # Dynamic group metadata like queries and status. # Optional. Dynamic group metadata like queries and status. "queries": [ # Memberships will be the union of all queries. Only one entry with USER resource is currently supported. Customers can create up to 100 dynamic groups. { # Defines a query on a resource. - "query": "A String", # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')` + "query": "A String", # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')` All users with any variation of the name John Doe (case-insensitive queries add `equalsIgnoreCase()` to the value being queried). `user.name.value.equalsIgnoreCase('jOhn DoE')` "resourceType": "A String", # Resource type for the Dynamic Group Query }, ], @@ -241,7 +241,7 @@

Method Details

"dynamicGroupMetadata": { # Dynamic group metadata like queries and status. # Optional. Dynamic group metadata like queries and status. "queries": [ # Memberships will be the union of all queries. Only one entry with USER resource is currently supported. Customers can create up to 100 dynamic groups. { # Defines a query on a resource. - "query": "A String", # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')` + "query": "A String", # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')` All users with any variation of the name John Doe (case-insensitive queries add `equalsIgnoreCase()` to the value being queried). `user.name.value.equalsIgnoreCase('jOhn DoE')` "resourceType": "A String", # Resource type for the Dynamic Group Query }, ], @@ -293,7 +293,7 @@

Method Details

"dynamicGroupMetadata": { # Dynamic group metadata like queries and status. # Optional. Dynamic group metadata like queries and status. "queries": [ # Memberships will be the union of all queries. Only one entry with USER resource is currently supported. Customers can create up to 100 dynamic groups. { # Defines a query on a resource. - "query": "A String", # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')` + "query": "A String", # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')` All users with any variation of the name John Doe (case-insensitive queries add `equalsIgnoreCase()` to the value being queried). `user.name.value.equalsIgnoreCase('jOhn DoE')` "resourceType": "A String", # Resource type for the Dynamic Group Query }, ], @@ -368,7 +368,7 @@

Method Details

"dynamicGroupMetadata": { # Dynamic group metadata like queries and status. # Optional. Dynamic group metadata like queries and status. "queries": [ # Memberships will be the union of all queries. Only one entry with USER resource is currently supported. Customers can create up to 100 dynamic groups. { # Defines a query on a resource. - "query": "A String", # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')` + "query": "A String", # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')` All users with any variation of the name John Doe (case-insensitive queries add `equalsIgnoreCase()` to the value being queried). `user.name.value.equalsIgnoreCase('jOhn DoE')` "resourceType": "A String", # Resource type for the Dynamic Group Query }, ], @@ -449,7 +449,7 @@

Method Details

"dynamicGroupMetadata": { # Dynamic group metadata like queries and status. # Optional. Dynamic group metadata like queries and status. "queries": [ # Memberships will be the union of all queries. Only one entry with USER resource is currently supported. Customers can create up to 100 dynamic groups. { # Defines a query on a resource. - "query": "A String", # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')` + "query": "A String", # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')` All users with any variation of the name John Doe (case-insensitive queries add `equalsIgnoreCase()` to the value being queried). `user.name.value.equalsIgnoreCase('jOhn DoE')` "resourceType": "A String", # Resource type for the Dynamic Group Query }, ], diff --git a/docs/dyn/cloudidentity_v1beta1.groups.html b/docs/dyn/cloudidentity_v1beta1.groups.html index ed8234594b6..6cd4328ce9c 100644 --- a/docs/dyn/cloudidentity_v1beta1.groups.html +++ b/docs/dyn/cloudidentity_v1beta1.groups.html @@ -136,7 +136,7 @@

Method Details

"dynamicGroupMetadata": { # Dynamic group metadata like queries and status. # Optional. Dynamic group metadata like queries and status. "queries": [ # Memberships will be the union of all queries. Only one entry with USER resource is currently supported. Customers can create up to 100 dynamic groups. { # Defines a query on a resource. - "query": "A String", # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')` + "query": "A String", # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')` All users with any variation of the name John Doe (case-insensitive queries add `equalsIgnoreCase()` to the value being queried). `user.name.value.equalsIgnoreCase('jOhn DoE')` "resourceType": "A String", }, ], @@ -260,7 +260,7 @@

Method Details

"dynamicGroupMetadata": { # Dynamic group metadata like queries and status. # Optional. Dynamic group metadata like queries and status. "queries": [ # Memberships will be the union of all queries. Only one entry with USER resource is currently supported. Customers can create up to 100 dynamic groups. { # Defines a query on a resource. - "query": "A String", # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')` + "query": "A String", # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')` All users with any variation of the name John Doe (case-insensitive queries add `equalsIgnoreCase()` to the value being queried). `user.name.value.equalsIgnoreCase('jOhn DoE')` "resourceType": "A String", }, ], @@ -325,7 +325,7 @@

Method Details

"dynamicGroupMetadata": { # Dynamic group metadata like queries and status. # Optional. Dynamic group metadata like queries and status. "queries": [ # Memberships will be the union of all queries. Only one entry with USER resource is currently supported. Customers can create up to 100 dynamic groups. { # Defines a query on a resource. - "query": "A String", # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')` + "query": "A String", # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')` All users with any variation of the name John Doe (case-insensitive queries add `equalsIgnoreCase()` to the value being queried). `user.name.value.equalsIgnoreCase('jOhn DoE')` "resourceType": "A String", }, ], @@ -413,7 +413,7 @@

Method Details

"dynamicGroupMetadata": { # Dynamic group metadata like queries and status. # Optional. Dynamic group metadata like queries and status. "queries": [ # Memberships will be the union of all queries. Only one entry with USER resource is currently supported. Customers can create up to 100 dynamic groups. { # Defines a query on a resource. - "query": "A String", # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')` + "query": "A String", # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')` All users with any variation of the name John Doe (case-insensitive queries add `equalsIgnoreCase()` to the value being queried). `user.name.value.equalsIgnoreCase('jOhn DoE')` "resourceType": "A String", }, ], @@ -506,7 +506,7 @@

Method Details

"dynamicGroupMetadata": { # Dynamic group metadata like queries and status. # Optional. Dynamic group metadata like queries and status. "queries": [ # Memberships will be the union of all queries. Only one entry with USER resource is currently supported. Customers can create up to 100 dynamic groups. { # Defines a query on a resource. - "query": "A String", # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')` + "query": "A String", # Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')` All users with any variation of the name John Doe (case-insensitive queries add `equalsIgnoreCase()` to the value being queried). `user.name.value.equalsIgnoreCase('jOhn DoE')` "resourceType": "A String", }, ], diff --git a/docs/dyn/composer_v1.projects.locations.environments.html b/docs/dyn/composer_v1.projects.locations.environments.html index 8486400353e..90c7288f749 100644 --- a/docs/dyn/composer_v1.projects.locations.environments.html +++ b/docs/dyn/composer_v1.projects.locations.environments.html @@ -114,45 +114,45 @@

Method Details

"config": { # Configuration information for an environment. # Configuration parameters for this environment. "airflowUri": "A String", # Output only. The URI of the Apache Airflow Web UI hosted within this environment (see [Airflow web interface](/composer/docs/how-to/accessing/airflow-web-interface)). "dagGcsPrefix": "A String", # Output only. The Cloud Storage prefix of the DAGs for this environment. Although Cloud Storage objects reside in a flat namespace, a hierarchical file tree can be simulated using "/"-delimited object name prefixes. DAG objects for this environment reside in a simulated directory with the given prefix. - "databaseConfig": { # The configuration of Cloud SQL instance that is used by the Apache Airflow software. # Optional. The configuration settings for Cloud SQL instance used internally by Apache Airflow software. + "databaseConfig": { # The configuration of Cloud SQL instance that is used by the Apache Airflow software. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. # Optional. The configuration settings for Cloud SQL instance used internally by Apache Airflow software. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "machineType": "A String", # Optional. Cloud SQL machine type used by Airflow database. It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 or db-n1-standard-16. If not specified, db-n1-standard-2 will be used. }, - "encryptionConfig": { # The encryption options for the Cloud Composer environment and its dependencies. # Optional. The encryption options for the Cloud Composer environment and its dependencies. Cannot be updated. + "encryptionConfig": { # The encryption options for the Cloud Composer environment and its dependencies.Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. # Optional. The encryption options for the Cloud Composer environment and its dependencies. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "kmsKeyName": "A String", # Optional. Customer-managed Encryption Key available through Google's Key Management Service. Cannot be updated. If not specified, Google-managed key will be used. }, "gkeCluster": "A String", # Output only. The Kubernetes Engine cluster used to run this environment. "nodeConfig": { # The configuration information for the Kubernetes Engine nodes running the Apache Airflow software. # The configuration used for the Kubernetes Engine cluster. - "diskSizeGb": 42, # Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated. + "diskSizeGb": 42, # Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "ipAllocationPolicy": { # Configuration for controlling how IPs are allocated in the GKE cluster running the Apache Airflow software. # Optional. The configuration for controlling how IPs are allocated in the GKE cluster. - "clusterIpv4CidrBlock": "A String", # Optional. The IP address range used to allocate IP addresses to pods in the GKE cluster. This field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. - "clusterSecondaryRangeName": "A String", # Optional. The name of the GKE cluster's secondary range used to allocate IP addresses to pods. This field is applicable only when `use_ip_aliases` is true. - "servicesIpv4CidrBlock": "A String", # Optional. The IP address range of the services IP addresses in this GKE cluster. This field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. - "servicesSecondaryRangeName": "A String", # Optional. The name of the services' secondary range used to allocate IP addresses to the GKE cluster. This field is applicable only when `use_ip_aliases` is true. - "useIpAliases": True or False, # Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created. + "clusterIpv4CidrBlock": "A String", # Optional. The IP address range used to allocate IP addresses to pods in the GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. + "clusterSecondaryRangeName": "A String", # Optional. The name of the GKE cluster's secondary range used to allocate IP addresses to pods. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. + "servicesIpv4CidrBlock": "A String", # Optional. The IP address range of the services IP addresses in this GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. + "servicesSecondaryRangeName": "A String", # Optional. The name of the services' secondary range used to allocate IP addresses to the GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. + "useIpAliases": True or False, # Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use VPC-native GKE clusters. }, - "location": "A String", # Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/zones/{zoneId}". This `location` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field. - "machineType": "A String", # Optional. The Compute Engine [machine type](/compute/docs/machine-types) used for cluster instances, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/zones/{zoneId}/machineTypes/{machineTypeId}". The `machineType` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.location` are specified, this `machineType` must belong to the `nodeConfig.location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If exactly one of this field and `nodeConfig.location` is specified, the location information from the specified field will be propagated to the unspecified field. The `machineTypeId` must not be a [shared-core machine type](/compute/docs/machine-types#sharedcore). If this field is unspecified, the `machineTypeId` defaults to "n1-standard-1". + "location": "A String", # Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/zones/{zoneId}". This `location` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + "machineType": "A String", # Optional. The Compute Engine [machine type](/compute/docs/machine-types) used for cluster instances, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/zones/{zoneId}/machineTypes/{machineTypeId}". The `machineType` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.location` are specified, this `machineType` must belong to the `nodeConfig.location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If exactly one of this field and `nodeConfig.location` is specified, the location information from the specified field will be propagated to the unspecified field. The `machineTypeId` must not be a [shared-core machine type](/compute/docs/machine-types#sharedcore). If this field is unspecified, the `machineTypeId` defaults to "n1-standard-1". This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "network": "A String", # Optional. The Compute Engine network to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/global/networks/{networkId}". If unspecified, the "default" network ID in the environment's project is used. If a [Custom Subnet Network](/vpc/docs/vpc#vpc_networks_and_subnets) is provided, `nodeConfig.subnetwork` must also be provided. For [Shared VPC](/vpc/docs/shared-vpc) subnetwork requirements, see `nodeConfig.subnetwork`. - "oauthScopes": [ # Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to ["https://www.googleapis.com/auth/cloud-platform"]. Cannot be updated. + "oauthScopes": [ # Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to ["https://www.googleapis.com/auth/cloud-platform"]. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "A String", ], "serviceAccount": "A String", # Optional. The Google Cloud Platform Service Account to be used by the node VMs. If a service account is not specified, the "default" Compute Engine service account is used. Cannot be updated. "subnetwork": "A String", # Optional. The Compute Engine subnetwork to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/regions/{regionId}/subnetworks/{subnetworkId}" If a subnetwork is provided, `nodeConfig.network` must also be provided, and the subnetwork must belong to the enclosing environment's project and location. - "tags": [ # Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated. + "tags": [ # Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "A String", ], }, - "nodeCount": 42, # The number of nodes in the Kubernetes Engine cluster that will be used to run this environment. + "nodeCount": 42, # The number of nodes in the Kubernetes Engine cluster that will be used to run this environment. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "privateEnvironmentConfig": { # The configuration information for configuring a Private IP Cloud Composer environment. # The configuration used for the Private IP Cloud Composer environment. "cloudSqlIpv4CidrBlock": "A String", # Optional. The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from `web_server_ipv4_cidr_block`. - "enablePrivateEnvironment": True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true. + "enablePrivateEnvironment": True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "privateClusterConfig": { # Configuration options for the private GKE cluster in a Cloud Composer environment. # Optional. Configuration for the private GKE cluster for a Private IP Cloud Composer environment. "enablePrivateEndpoint": True or False, # Optional. If `true`, access to the public endpoint of the GKE cluster is denied. "masterIpv4CidrBlock": "A String", # Optional. The CIDR block from which IPv4 range for GKE master will be reserved. If left blank, the default value of '172.16.0.0/23' is used. "masterIpv4ReservedRange": "A String", # Output only. The IP range in CIDR notation to use for the hosted master network. This range is used for assigning internal IP addresses to the GKE cluster master or set of masters and to the internal load balancer virtual IP. This range must not overlap with any other ranges in use within the cluster's network. }, - "webServerIpv4CidrBlock": "A String", # Optional. The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from `private_cluster_config.master_ipv4_cidr_block` and `cloud_sql_ipv4_cidr_block`. - "webServerIpv4ReservedRange": "A String", # Output only. The IP range reserved for the tenant project's App Engine VMs. + "webServerIpv4CidrBlock": "A String", # Optional. The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from `private_cluster_config.master_ipv4_cidr_block` and `cloud_sql_ipv4_cidr_block`. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + "webServerIpv4ReservedRange": "A String", # Output only. The IP range reserved for the tenant project's App Engine VMs. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. }, "softwareConfig": { # Specifies the selection and configuration of software inside the environment. # The configuration settings for software inside the environment. "airflowConfigOverrides": { # Optional. Apache Airflow configuration properties to override. Property keys contain the section and property names, separated by a hyphen, for example "core-dags_are_paused_at_creation". Section names must not contain hyphens ("-"), opening square brackets ("["), or closing square brackets ("]"). The property name must not be empty and must not contain an equals sign ("=") or semicolon (";"). Section and property names must not contain a period ("."). Apache Airflow configuration property names must be written in [snake_case](https://en.wikipedia.org/wiki/Snake_case). Property values can contain any character, and can be written in any lower/upper case format. Certain Apache Airflow configuration property values are [blocked](/composer/docs/concepts/airflow-configurations), and cannot be overridden. @@ -165,13 +165,13 @@

Method Details

"pypiPackages": { # Optional. Custom Python Package Index (PyPI) packages to be installed in the environment. Keys refer to the lowercase package name such as "numpy" and values are the lowercase extras and version specifier such as "==1.12.0", "[devel,gcp_api]", or "[devel]>=1.8.2, <1.9.2". To specify a package without pinning it to a version specifier, use the empty string as the value. "a_key": "A String", }, - "pythonVersion": "A String", # Optional. The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to '2' or '3'. If not specified, the default is '3'. Cannot be updated. + "pythonVersion": "A String", # Optional. The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to '2' or '3'. If not specified, the default is '3'. Cannot be updated. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use Python major version 3. "schedulerCount": 42, # Optional. The number of schedulers for Airflow. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-2.*.*. }, - "webServerConfig": { # The configuration settings for the Airflow web server App Engine instance. # Optional. The configuration settings for the Airflow web server App Engine instance. + "webServerConfig": { # The configuration settings for the Airflow web server App Engine instance. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.* # Optional. The configuration settings for the Airflow web server App Engine instance. "machineType": "A String", # Optional. Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. If not specified, composer-n1-webserver-2 will be used. Value custom is returned only in response, if Airflow web server parameters were manually changed to a non-standard values. }, - "webServerNetworkAccessControl": { # Network-level access control policy for the Airflow web server. # Optional. The network-level access control policy for the Airflow web server. If unspecified, no network-level access restrictions will be applied. + "webServerNetworkAccessControl": { # Network-level access control policy for the Airflow web server. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. # Optional. The network-level access control policy for the Airflow web server. If unspecified, no network-level access restrictions will be applied. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "allowedIpRanges": [ # A collection of allowed IP ranges with descriptions. { # Allowed IP range with user-provided description. "description": "A String", # Optional. User-provided description. It must contain at most 300 characters. @@ -272,45 +272,45 @@

Method Details

"config": { # Configuration information for an environment. # Configuration parameters for this environment. "airflowUri": "A String", # Output only. The URI of the Apache Airflow Web UI hosted within this environment (see [Airflow web interface](/composer/docs/how-to/accessing/airflow-web-interface)). "dagGcsPrefix": "A String", # Output only. The Cloud Storage prefix of the DAGs for this environment. Although Cloud Storage objects reside in a flat namespace, a hierarchical file tree can be simulated using "/"-delimited object name prefixes. DAG objects for this environment reside in a simulated directory with the given prefix. - "databaseConfig": { # The configuration of Cloud SQL instance that is used by the Apache Airflow software. # Optional. The configuration settings for Cloud SQL instance used internally by Apache Airflow software. + "databaseConfig": { # The configuration of Cloud SQL instance that is used by the Apache Airflow software. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. # Optional. The configuration settings for Cloud SQL instance used internally by Apache Airflow software. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "machineType": "A String", # Optional. Cloud SQL machine type used by Airflow database. It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 or db-n1-standard-16. If not specified, db-n1-standard-2 will be used. }, - "encryptionConfig": { # The encryption options for the Cloud Composer environment and its dependencies. # Optional. The encryption options for the Cloud Composer environment and its dependencies. Cannot be updated. + "encryptionConfig": { # The encryption options for the Cloud Composer environment and its dependencies.Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. # Optional. The encryption options for the Cloud Composer environment and its dependencies. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "kmsKeyName": "A String", # Optional. Customer-managed Encryption Key available through Google's Key Management Service. Cannot be updated. If not specified, Google-managed key will be used. }, "gkeCluster": "A String", # Output only. The Kubernetes Engine cluster used to run this environment. "nodeConfig": { # The configuration information for the Kubernetes Engine nodes running the Apache Airflow software. # The configuration used for the Kubernetes Engine cluster. - "diskSizeGb": 42, # Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated. + "diskSizeGb": 42, # Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "ipAllocationPolicy": { # Configuration for controlling how IPs are allocated in the GKE cluster running the Apache Airflow software. # Optional. The configuration for controlling how IPs are allocated in the GKE cluster. - "clusterIpv4CidrBlock": "A String", # Optional. The IP address range used to allocate IP addresses to pods in the GKE cluster. This field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. - "clusterSecondaryRangeName": "A String", # Optional. The name of the GKE cluster's secondary range used to allocate IP addresses to pods. This field is applicable only when `use_ip_aliases` is true. - "servicesIpv4CidrBlock": "A String", # Optional. The IP address range of the services IP addresses in this GKE cluster. This field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. - "servicesSecondaryRangeName": "A String", # Optional. The name of the services' secondary range used to allocate IP addresses to the GKE cluster. This field is applicable only when `use_ip_aliases` is true. - "useIpAliases": True or False, # Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created. + "clusterIpv4CidrBlock": "A String", # Optional. The IP address range used to allocate IP addresses to pods in the GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. + "clusterSecondaryRangeName": "A String", # Optional. The name of the GKE cluster's secondary range used to allocate IP addresses to pods. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. + "servicesIpv4CidrBlock": "A String", # Optional. The IP address range of the services IP addresses in this GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. + "servicesSecondaryRangeName": "A String", # Optional. The name of the services' secondary range used to allocate IP addresses to the GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. + "useIpAliases": True or False, # Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use VPC-native GKE clusters. }, - "location": "A String", # Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/zones/{zoneId}". This `location` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field. - "machineType": "A String", # Optional. The Compute Engine [machine type](/compute/docs/machine-types) used for cluster instances, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/zones/{zoneId}/machineTypes/{machineTypeId}". The `machineType` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.location` are specified, this `machineType` must belong to the `nodeConfig.location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If exactly one of this field and `nodeConfig.location` is specified, the location information from the specified field will be propagated to the unspecified field. The `machineTypeId` must not be a [shared-core machine type](/compute/docs/machine-types#sharedcore). If this field is unspecified, the `machineTypeId` defaults to "n1-standard-1". + "location": "A String", # Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/zones/{zoneId}". This `location` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + "machineType": "A String", # Optional. The Compute Engine [machine type](/compute/docs/machine-types) used for cluster instances, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/zones/{zoneId}/machineTypes/{machineTypeId}". The `machineType` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.location` are specified, this `machineType` must belong to the `nodeConfig.location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If exactly one of this field and `nodeConfig.location` is specified, the location information from the specified field will be propagated to the unspecified field. The `machineTypeId` must not be a [shared-core machine type](/compute/docs/machine-types#sharedcore). If this field is unspecified, the `machineTypeId` defaults to "n1-standard-1". This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "network": "A String", # Optional. The Compute Engine network to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/global/networks/{networkId}". If unspecified, the "default" network ID in the environment's project is used. If a [Custom Subnet Network](/vpc/docs/vpc#vpc_networks_and_subnets) is provided, `nodeConfig.subnetwork` must also be provided. For [Shared VPC](/vpc/docs/shared-vpc) subnetwork requirements, see `nodeConfig.subnetwork`. - "oauthScopes": [ # Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to ["https://www.googleapis.com/auth/cloud-platform"]. Cannot be updated. + "oauthScopes": [ # Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to ["https://www.googleapis.com/auth/cloud-platform"]. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "A String", ], "serviceAccount": "A String", # Optional. The Google Cloud Platform Service Account to be used by the node VMs. If a service account is not specified, the "default" Compute Engine service account is used. Cannot be updated. "subnetwork": "A String", # Optional. The Compute Engine subnetwork to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/regions/{regionId}/subnetworks/{subnetworkId}" If a subnetwork is provided, `nodeConfig.network` must also be provided, and the subnetwork must belong to the enclosing environment's project and location. - "tags": [ # Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated. + "tags": [ # Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "A String", ], }, - "nodeCount": 42, # The number of nodes in the Kubernetes Engine cluster that will be used to run this environment. + "nodeCount": 42, # The number of nodes in the Kubernetes Engine cluster that will be used to run this environment. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "privateEnvironmentConfig": { # The configuration information for configuring a Private IP Cloud Composer environment. # The configuration used for the Private IP Cloud Composer environment. "cloudSqlIpv4CidrBlock": "A String", # Optional. The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from `web_server_ipv4_cidr_block`. - "enablePrivateEnvironment": True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true. + "enablePrivateEnvironment": True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "privateClusterConfig": { # Configuration options for the private GKE cluster in a Cloud Composer environment. # Optional. Configuration for the private GKE cluster for a Private IP Cloud Composer environment. "enablePrivateEndpoint": True or False, # Optional. If `true`, access to the public endpoint of the GKE cluster is denied. "masterIpv4CidrBlock": "A String", # Optional. The CIDR block from which IPv4 range for GKE master will be reserved. If left blank, the default value of '172.16.0.0/23' is used. "masterIpv4ReservedRange": "A String", # Output only. The IP range in CIDR notation to use for the hosted master network. This range is used for assigning internal IP addresses to the GKE cluster master or set of masters and to the internal load balancer virtual IP. This range must not overlap with any other ranges in use within the cluster's network. }, - "webServerIpv4CidrBlock": "A String", # Optional. The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from `private_cluster_config.master_ipv4_cidr_block` and `cloud_sql_ipv4_cidr_block`. - "webServerIpv4ReservedRange": "A String", # Output only. The IP range reserved for the tenant project's App Engine VMs. + "webServerIpv4CidrBlock": "A String", # Optional. The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from `private_cluster_config.master_ipv4_cidr_block` and `cloud_sql_ipv4_cidr_block`. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + "webServerIpv4ReservedRange": "A String", # Output only. The IP range reserved for the tenant project's App Engine VMs. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. }, "softwareConfig": { # Specifies the selection and configuration of software inside the environment. # The configuration settings for software inside the environment. "airflowConfigOverrides": { # Optional. Apache Airflow configuration properties to override. Property keys contain the section and property names, separated by a hyphen, for example "core-dags_are_paused_at_creation". Section names must not contain hyphens ("-"), opening square brackets ("["), or closing square brackets ("]"). The property name must not be empty and must not contain an equals sign ("=") or semicolon (";"). Section and property names must not contain a period ("."). Apache Airflow configuration property names must be written in [snake_case](https://en.wikipedia.org/wiki/Snake_case). Property values can contain any character, and can be written in any lower/upper case format. Certain Apache Airflow configuration property values are [blocked](/composer/docs/concepts/airflow-configurations), and cannot be overridden. @@ -323,13 +323,13 @@

Method Details

"pypiPackages": { # Optional. Custom Python Package Index (PyPI) packages to be installed in the environment. Keys refer to the lowercase package name such as "numpy" and values are the lowercase extras and version specifier such as "==1.12.0", "[devel,gcp_api]", or "[devel]>=1.8.2, <1.9.2". To specify a package without pinning it to a version specifier, use the empty string as the value. "a_key": "A String", }, - "pythonVersion": "A String", # Optional. The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to '2' or '3'. If not specified, the default is '3'. Cannot be updated. + "pythonVersion": "A String", # Optional. The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to '2' or '3'. If not specified, the default is '3'. Cannot be updated. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use Python major version 3. "schedulerCount": 42, # Optional. The number of schedulers for Airflow. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-2.*.*. }, - "webServerConfig": { # The configuration settings for the Airflow web server App Engine instance. # Optional. The configuration settings for the Airflow web server App Engine instance. + "webServerConfig": { # The configuration settings for the Airflow web server App Engine instance. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.* # Optional. The configuration settings for the Airflow web server App Engine instance. "machineType": "A String", # Optional. Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. If not specified, composer-n1-webserver-2 will be used. Value custom is returned only in response, if Airflow web server parameters were manually changed to a non-standard values. }, - "webServerNetworkAccessControl": { # Network-level access control policy for the Airflow web server. # Optional. The network-level access control policy for the Airflow web server. If unspecified, no network-level access restrictions will be applied. + "webServerNetworkAccessControl": { # Network-level access control policy for the Airflow web server. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. # Optional. The network-level access control policy for the Airflow web server. If unspecified, no network-level access restrictions will be applied. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "allowedIpRanges": [ # A collection of allowed IP ranges with descriptions. { # Allowed IP range with user-provided description. "description": "A String", # Optional. User-provided description. It must contain at most 300 characters. @@ -371,45 +371,45 @@

Method Details

"config": { # Configuration information for an environment. # Configuration parameters for this environment. "airflowUri": "A String", # Output only. The URI of the Apache Airflow Web UI hosted within this environment (see [Airflow web interface](/composer/docs/how-to/accessing/airflow-web-interface)). "dagGcsPrefix": "A String", # Output only. The Cloud Storage prefix of the DAGs for this environment. Although Cloud Storage objects reside in a flat namespace, a hierarchical file tree can be simulated using "/"-delimited object name prefixes. DAG objects for this environment reside in a simulated directory with the given prefix. - "databaseConfig": { # The configuration of Cloud SQL instance that is used by the Apache Airflow software. # Optional. The configuration settings for Cloud SQL instance used internally by Apache Airflow software. + "databaseConfig": { # The configuration of Cloud SQL instance that is used by the Apache Airflow software. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. # Optional. The configuration settings for Cloud SQL instance used internally by Apache Airflow software. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "machineType": "A String", # Optional. Cloud SQL machine type used by Airflow database. It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 or db-n1-standard-16. If not specified, db-n1-standard-2 will be used. }, - "encryptionConfig": { # The encryption options for the Cloud Composer environment and its dependencies. # Optional. The encryption options for the Cloud Composer environment and its dependencies. Cannot be updated. + "encryptionConfig": { # The encryption options for the Cloud Composer environment and its dependencies.Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. # Optional. The encryption options for the Cloud Composer environment and its dependencies. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "kmsKeyName": "A String", # Optional. Customer-managed Encryption Key available through Google's Key Management Service. Cannot be updated. If not specified, Google-managed key will be used. }, "gkeCluster": "A String", # Output only. The Kubernetes Engine cluster used to run this environment. "nodeConfig": { # The configuration information for the Kubernetes Engine nodes running the Apache Airflow software. # The configuration used for the Kubernetes Engine cluster. - "diskSizeGb": 42, # Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated. + "diskSizeGb": 42, # Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "ipAllocationPolicy": { # Configuration for controlling how IPs are allocated in the GKE cluster running the Apache Airflow software. # Optional. The configuration for controlling how IPs are allocated in the GKE cluster. - "clusterIpv4CidrBlock": "A String", # Optional. The IP address range used to allocate IP addresses to pods in the GKE cluster. This field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. - "clusterSecondaryRangeName": "A String", # Optional. The name of the GKE cluster's secondary range used to allocate IP addresses to pods. This field is applicable only when `use_ip_aliases` is true. - "servicesIpv4CidrBlock": "A String", # Optional. The IP address range of the services IP addresses in this GKE cluster. This field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. - "servicesSecondaryRangeName": "A String", # Optional. The name of the services' secondary range used to allocate IP addresses to the GKE cluster. This field is applicable only when `use_ip_aliases` is true. - "useIpAliases": True or False, # Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created. + "clusterIpv4CidrBlock": "A String", # Optional. The IP address range used to allocate IP addresses to pods in the GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. + "clusterSecondaryRangeName": "A String", # Optional. The name of the GKE cluster's secondary range used to allocate IP addresses to pods. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. + "servicesIpv4CidrBlock": "A String", # Optional. The IP address range of the services IP addresses in this GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. + "servicesSecondaryRangeName": "A String", # Optional. The name of the services' secondary range used to allocate IP addresses to the GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. + "useIpAliases": True or False, # Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use VPC-native GKE clusters. }, - "location": "A String", # Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/zones/{zoneId}". This `location` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field. - "machineType": "A String", # Optional. The Compute Engine [machine type](/compute/docs/machine-types) used for cluster instances, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/zones/{zoneId}/machineTypes/{machineTypeId}". The `machineType` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.location` are specified, this `machineType` must belong to the `nodeConfig.location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If exactly one of this field and `nodeConfig.location` is specified, the location information from the specified field will be propagated to the unspecified field. The `machineTypeId` must not be a [shared-core machine type](/compute/docs/machine-types#sharedcore). If this field is unspecified, the `machineTypeId` defaults to "n1-standard-1". + "location": "A String", # Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/zones/{zoneId}". This `location` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + "machineType": "A String", # Optional. The Compute Engine [machine type](/compute/docs/machine-types) used for cluster instances, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/zones/{zoneId}/machineTypes/{machineTypeId}". The `machineType` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.location` are specified, this `machineType` must belong to the `nodeConfig.location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If exactly one of this field and `nodeConfig.location` is specified, the location information from the specified field will be propagated to the unspecified field. The `machineTypeId` must not be a [shared-core machine type](/compute/docs/machine-types#sharedcore). If this field is unspecified, the `machineTypeId` defaults to "n1-standard-1". This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "network": "A String", # Optional. The Compute Engine network to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/global/networks/{networkId}". If unspecified, the "default" network ID in the environment's project is used. If a [Custom Subnet Network](/vpc/docs/vpc#vpc_networks_and_subnets) is provided, `nodeConfig.subnetwork` must also be provided. For [Shared VPC](/vpc/docs/shared-vpc) subnetwork requirements, see `nodeConfig.subnetwork`. - "oauthScopes": [ # Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to ["https://www.googleapis.com/auth/cloud-platform"]. Cannot be updated. + "oauthScopes": [ # Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to ["https://www.googleapis.com/auth/cloud-platform"]. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "A String", ], "serviceAccount": "A String", # Optional. The Google Cloud Platform Service Account to be used by the node VMs. If a service account is not specified, the "default" Compute Engine service account is used. Cannot be updated. "subnetwork": "A String", # Optional. The Compute Engine subnetwork to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/regions/{regionId}/subnetworks/{subnetworkId}" If a subnetwork is provided, `nodeConfig.network` must also be provided, and the subnetwork must belong to the enclosing environment's project and location. - "tags": [ # Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated. + "tags": [ # Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "A String", ], }, - "nodeCount": 42, # The number of nodes in the Kubernetes Engine cluster that will be used to run this environment. + "nodeCount": 42, # The number of nodes in the Kubernetes Engine cluster that will be used to run this environment. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "privateEnvironmentConfig": { # The configuration information for configuring a Private IP Cloud Composer environment. # The configuration used for the Private IP Cloud Composer environment. "cloudSqlIpv4CidrBlock": "A String", # Optional. The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from `web_server_ipv4_cidr_block`. - "enablePrivateEnvironment": True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true. + "enablePrivateEnvironment": True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "privateClusterConfig": { # Configuration options for the private GKE cluster in a Cloud Composer environment. # Optional. Configuration for the private GKE cluster for a Private IP Cloud Composer environment. "enablePrivateEndpoint": True or False, # Optional. If `true`, access to the public endpoint of the GKE cluster is denied. "masterIpv4CidrBlock": "A String", # Optional. The CIDR block from which IPv4 range for GKE master will be reserved. If left blank, the default value of '172.16.0.0/23' is used. "masterIpv4ReservedRange": "A String", # Output only. The IP range in CIDR notation to use for the hosted master network. This range is used for assigning internal IP addresses to the GKE cluster master or set of masters and to the internal load balancer virtual IP. This range must not overlap with any other ranges in use within the cluster's network. }, - "webServerIpv4CidrBlock": "A String", # Optional. The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from `private_cluster_config.master_ipv4_cidr_block` and `cloud_sql_ipv4_cidr_block`. - "webServerIpv4ReservedRange": "A String", # Output only. The IP range reserved for the tenant project's App Engine VMs. + "webServerIpv4CidrBlock": "A String", # Optional. The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from `private_cluster_config.master_ipv4_cidr_block` and `cloud_sql_ipv4_cidr_block`. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + "webServerIpv4ReservedRange": "A String", # Output only. The IP range reserved for the tenant project's App Engine VMs. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. }, "softwareConfig": { # Specifies the selection and configuration of software inside the environment. # The configuration settings for software inside the environment. "airflowConfigOverrides": { # Optional. Apache Airflow configuration properties to override. Property keys contain the section and property names, separated by a hyphen, for example "core-dags_are_paused_at_creation". Section names must not contain hyphens ("-"), opening square brackets ("["), or closing square brackets ("]"). The property name must not be empty and must not contain an equals sign ("=") or semicolon (";"). Section and property names must not contain a period ("."). Apache Airflow configuration property names must be written in [snake_case](https://en.wikipedia.org/wiki/Snake_case). Property values can contain any character, and can be written in any lower/upper case format. Certain Apache Airflow configuration property values are [blocked](/composer/docs/concepts/airflow-configurations), and cannot be overridden. @@ -422,13 +422,13 @@

Method Details

"pypiPackages": { # Optional. Custom Python Package Index (PyPI) packages to be installed in the environment. Keys refer to the lowercase package name such as "numpy" and values are the lowercase extras and version specifier such as "==1.12.0", "[devel,gcp_api]", or "[devel]>=1.8.2, <1.9.2". To specify a package without pinning it to a version specifier, use the empty string as the value. "a_key": "A String", }, - "pythonVersion": "A String", # Optional. The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to '2' or '3'. If not specified, the default is '3'. Cannot be updated. + "pythonVersion": "A String", # Optional. The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to '2' or '3'. If not specified, the default is '3'. Cannot be updated. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use Python major version 3. "schedulerCount": 42, # Optional. The number of schedulers for Airflow. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-2.*.*. }, - "webServerConfig": { # The configuration settings for the Airflow web server App Engine instance. # Optional. The configuration settings for the Airflow web server App Engine instance. + "webServerConfig": { # The configuration settings for the Airflow web server App Engine instance. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.* # Optional. The configuration settings for the Airflow web server App Engine instance. "machineType": "A String", # Optional. Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. If not specified, composer-n1-webserver-2 will be used. Value custom is returned only in response, if Airflow web server parameters were manually changed to a non-standard values. }, - "webServerNetworkAccessControl": { # Network-level access control policy for the Airflow web server. # Optional. The network-level access control policy for the Airflow web server. If unspecified, no network-level access restrictions will be applied. + "webServerNetworkAccessControl": { # Network-level access control policy for the Airflow web server. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. # Optional. The network-level access control policy for the Airflow web server. If unspecified, no network-level access restrictions will be applied. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "allowedIpRanges": [ # A collection of allowed IP ranges with descriptions. { # Allowed IP range with user-provided description. "description": "A String", # Optional. User-provided description. It must contain at most 300 characters. @@ -478,45 +478,45 @@

Method Details

"config": { # Configuration information for an environment. # Configuration parameters for this environment. "airflowUri": "A String", # Output only. The URI of the Apache Airflow Web UI hosted within this environment (see [Airflow web interface](/composer/docs/how-to/accessing/airflow-web-interface)). "dagGcsPrefix": "A String", # Output only. The Cloud Storage prefix of the DAGs for this environment. Although Cloud Storage objects reside in a flat namespace, a hierarchical file tree can be simulated using "/"-delimited object name prefixes. DAG objects for this environment reside in a simulated directory with the given prefix. - "databaseConfig": { # The configuration of Cloud SQL instance that is used by the Apache Airflow software. # Optional. The configuration settings for Cloud SQL instance used internally by Apache Airflow software. + "databaseConfig": { # The configuration of Cloud SQL instance that is used by the Apache Airflow software. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. # Optional. The configuration settings for Cloud SQL instance used internally by Apache Airflow software. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "machineType": "A String", # Optional. Cloud SQL machine type used by Airflow database. It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 or db-n1-standard-16. If not specified, db-n1-standard-2 will be used. }, - "encryptionConfig": { # The encryption options for the Cloud Composer environment and its dependencies. # Optional. The encryption options for the Cloud Composer environment and its dependencies. Cannot be updated. + "encryptionConfig": { # The encryption options for the Cloud Composer environment and its dependencies.Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. # Optional. The encryption options for the Cloud Composer environment and its dependencies. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "kmsKeyName": "A String", # Optional. Customer-managed Encryption Key available through Google's Key Management Service. Cannot be updated. If not specified, Google-managed key will be used. }, "gkeCluster": "A String", # Output only. The Kubernetes Engine cluster used to run this environment. "nodeConfig": { # The configuration information for the Kubernetes Engine nodes running the Apache Airflow software. # The configuration used for the Kubernetes Engine cluster. - "diskSizeGb": 42, # Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated. + "diskSizeGb": 42, # Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "ipAllocationPolicy": { # Configuration for controlling how IPs are allocated in the GKE cluster running the Apache Airflow software. # Optional. The configuration for controlling how IPs are allocated in the GKE cluster. - "clusterIpv4CidrBlock": "A String", # Optional. The IP address range used to allocate IP addresses to pods in the GKE cluster. This field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. - "clusterSecondaryRangeName": "A String", # Optional. The name of the GKE cluster's secondary range used to allocate IP addresses to pods. This field is applicable only when `use_ip_aliases` is true. - "servicesIpv4CidrBlock": "A String", # Optional. The IP address range of the services IP addresses in this GKE cluster. This field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. - "servicesSecondaryRangeName": "A String", # Optional. The name of the services' secondary range used to allocate IP addresses to the GKE cluster. This field is applicable only when `use_ip_aliases` is true. - "useIpAliases": True or False, # Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created. + "clusterIpv4CidrBlock": "A String", # Optional. The IP address range used to allocate IP addresses to pods in the GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. + "clusterSecondaryRangeName": "A String", # Optional. The name of the GKE cluster's secondary range used to allocate IP addresses to pods. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. + "servicesIpv4CidrBlock": "A String", # Optional. The IP address range of the services IP addresses in this GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. + "servicesSecondaryRangeName": "A String", # Optional. The name of the services' secondary range used to allocate IP addresses to the GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. + "useIpAliases": True or False, # Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use VPC-native GKE clusters. }, - "location": "A String", # Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/zones/{zoneId}". This `location` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field. - "machineType": "A String", # Optional. The Compute Engine [machine type](/compute/docs/machine-types) used for cluster instances, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/zones/{zoneId}/machineTypes/{machineTypeId}". The `machineType` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.location` are specified, this `machineType` must belong to the `nodeConfig.location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If exactly one of this field and `nodeConfig.location` is specified, the location information from the specified field will be propagated to the unspecified field. The `machineTypeId` must not be a [shared-core machine type](/compute/docs/machine-types#sharedcore). If this field is unspecified, the `machineTypeId` defaults to "n1-standard-1". + "location": "A String", # Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/zones/{zoneId}". This `location` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + "machineType": "A String", # Optional. The Compute Engine [machine type](/compute/docs/machine-types) used for cluster instances, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/zones/{zoneId}/machineTypes/{machineTypeId}". The `machineType` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.location` are specified, this `machineType` must belong to the `nodeConfig.location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If exactly one of this field and `nodeConfig.location` is specified, the location information from the specified field will be propagated to the unspecified field. The `machineTypeId` must not be a [shared-core machine type](/compute/docs/machine-types#sharedcore). If this field is unspecified, the `machineTypeId` defaults to "n1-standard-1". This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "network": "A String", # Optional. The Compute Engine network to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/global/networks/{networkId}". If unspecified, the "default" network ID in the environment's project is used. If a [Custom Subnet Network](/vpc/docs/vpc#vpc_networks_and_subnets) is provided, `nodeConfig.subnetwork` must also be provided. For [Shared VPC](/vpc/docs/shared-vpc) subnetwork requirements, see `nodeConfig.subnetwork`. - "oauthScopes": [ # Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to ["https://www.googleapis.com/auth/cloud-platform"]. Cannot be updated. + "oauthScopes": [ # Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to ["https://www.googleapis.com/auth/cloud-platform"]. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "A String", ], "serviceAccount": "A String", # Optional. The Google Cloud Platform Service Account to be used by the node VMs. If a service account is not specified, the "default" Compute Engine service account is used. Cannot be updated. "subnetwork": "A String", # Optional. The Compute Engine subnetwork to be used for machine communications, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/regions/{regionId}/subnetworks/{subnetworkId}" If a subnetwork is provided, `nodeConfig.network` must also be provided, and the subnetwork must belong to the enclosing environment's project and location. - "tags": [ # Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated. + "tags": [ # Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "A String", ], }, - "nodeCount": 42, # The number of nodes in the Kubernetes Engine cluster that will be used to run this environment. + "nodeCount": 42, # The number of nodes in the Kubernetes Engine cluster that will be used to run this environment. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "privateEnvironmentConfig": { # The configuration information for configuring a Private IP Cloud Composer environment. # The configuration used for the Private IP Cloud Composer environment. "cloudSqlIpv4CidrBlock": "A String", # Optional. The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from `web_server_ipv4_cidr_block`. - "enablePrivateEnvironment": True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true. + "enablePrivateEnvironment": True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "privateClusterConfig": { # Configuration options for the private GKE cluster in a Cloud Composer environment. # Optional. Configuration for the private GKE cluster for a Private IP Cloud Composer environment. "enablePrivateEndpoint": True or False, # Optional. If `true`, access to the public endpoint of the GKE cluster is denied. "masterIpv4CidrBlock": "A String", # Optional. The CIDR block from which IPv4 range for GKE master will be reserved. If left blank, the default value of '172.16.0.0/23' is used. "masterIpv4ReservedRange": "A String", # Output only. The IP range in CIDR notation to use for the hosted master network. This range is used for assigning internal IP addresses to the GKE cluster master or set of masters and to the internal load balancer virtual IP. This range must not overlap with any other ranges in use within the cluster's network. }, - "webServerIpv4CidrBlock": "A String", # Optional. The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from `private_cluster_config.master_ipv4_cidr_block` and `cloud_sql_ipv4_cidr_block`. - "webServerIpv4ReservedRange": "A String", # Output only. The IP range reserved for the tenant project's App Engine VMs. + "webServerIpv4CidrBlock": "A String", # Optional. The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from `private_cluster_config.master_ipv4_cidr_block` and `cloud_sql_ipv4_cidr_block`. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + "webServerIpv4ReservedRange": "A String", # Output only. The IP range reserved for the tenant project's App Engine VMs. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. }, "softwareConfig": { # Specifies the selection and configuration of software inside the environment. # The configuration settings for software inside the environment. "airflowConfigOverrides": { # Optional. Apache Airflow configuration properties to override. Property keys contain the section and property names, separated by a hyphen, for example "core-dags_are_paused_at_creation". Section names must not contain hyphens ("-"), opening square brackets ("["), or closing square brackets ("]"). The property name must not be empty and must not contain an equals sign ("=") or semicolon (";"). Section and property names must not contain a period ("."). Apache Airflow configuration property names must be written in [snake_case](https://en.wikipedia.org/wiki/Snake_case). Property values can contain any character, and can be written in any lower/upper case format. Certain Apache Airflow configuration property values are [blocked](/composer/docs/concepts/airflow-configurations), and cannot be overridden. @@ -529,13 +529,13 @@

Method Details

"pypiPackages": { # Optional. Custom Python Package Index (PyPI) packages to be installed in the environment. Keys refer to the lowercase package name such as "numpy" and values are the lowercase extras and version specifier such as "==1.12.0", "[devel,gcp_api]", or "[devel]>=1.8.2, <1.9.2". To specify a package without pinning it to a version specifier, use the empty string as the value. "a_key": "A String", }, - "pythonVersion": "A String", # Optional. The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to '2' or '3'. If not specified, the default is '3'. Cannot be updated. + "pythonVersion": "A String", # Optional. The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to '2' or '3'. If not specified, the default is '3'. Cannot be updated. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use Python major version 3. "schedulerCount": 42, # Optional. The number of schedulers for Airflow. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-2.*.*. }, - "webServerConfig": { # The configuration settings for the Airflow web server App Engine instance. # Optional. The configuration settings for the Airflow web server App Engine instance. + "webServerConfig": { # The configuration settings for the Airflow web server App Engine instance. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.* # Optional. The configuration settings for the Airflow web server App Engine instance. "machineType": "A String", # Optional. Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. If not specified, composer-n1-webserver-2 will be used. Value custom is returned only in response, if Airflow web server parameters were manually changed to a non-standard values. }, - "webServerNetworkAccessControl": { # Network-level access control policy for the Airflow web server. # Optional. The network-level access control policy for the Airflow web server. If unspecified, no network-level access restrictions will be applied. + "webServerNetworkAccessControl": { # Network-level access control policy for the Airflow web server. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. # Optional. The network-level access control policy for the Airflow web server. If unspecified, no network-level access restrictions will be applied. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "allowedIpRanges": [ # A collection of allowed IP ranges with descriptions. { # Allowed IP range with user-provided description. "description": "A String", # Optional. User-provided description. It must contain at most 300 characters. @@ -554,7 +554,7 @@

Method Details

"uuid": "A String", # Output only. The UUID (Universally Unique IDentifier) associated with this environment. This value is generated when the environment is created. } - updateMask: string, Required. A comma-separated list of paths, relative to `Environment`, of fields to update. For example, to set the version of scikit-learn to install in the environment to 0.19.0 and to remove an existing installation of numpy, the `updateMask` parameter would include the following two `paths` values: "config.softwareConfig.pypiPackages.scikit-learn" and "config.softwareConfig.pypiPackages.numpy". The included patch environment would specify the scikit-learn version as follows: { "config":{ "softwareConfig":{ "pypiPackages":{ "scikit-learn":"==0.19.0" } } } } Note that in the above example, any existing PyPI packages other than scikit-learn and numpy will be unaffected. Only one update type may be included in a single request's `updateMask`. For example, one cannot update both the PyPI packages and labels in the same request. However, it is possible to update multiple members of a map field simultaneously in the same request. For example, to set the labels "label1" and "label2" while clearing "label3" (assuming it already exists), one can provide the paths "labels.label1", "labels.label2", and "labels.label3" and populate the patch environment as follows: { "labels":{ "label1":"new-label1-value" "label2":"new-label2-value" } } Note that in the above example, any existing labels that are not included in the `updateMask` will be unaffected. It is also possible to replace an entire map field by providing the map field's path in the `updateMask`. The new value of the field will be that which is provided in the patch environment. For example, to delete all pre-existing user-specified PyPI packages and install botocore at version 1.7.14, the `updateMask` would contain the path "config.softwareConfig.pypiPackages", and the patch environment would be the following: { "config":{ "softwareConfig":{ "pypiPackages":{ "botocore":"==1.7.14" } } } } **Note:** Only the following fields can be updated: * `config.softwareConfig.pypiPackages` * Replace all custom custom PyPI packages. If a replacement package map is not included in `environment`, all custom PyPI packages are cleared. It is an error to provide both this mask and a mask specifying an individual package. * `config.softwareConfig.pypiPackages.`packagename * Update the custom PyPI package *packagename*, preserving other packages. To delete the package, include it in `updateMask`, and omit the mapping for it in `environment.config.softwareConfig.pypiPackages`. It is an error to provide both a mask of this form and the `config.softwareConfig.pypiPackages` mask. * `labels` * Replace all environment labels. If a replacement labels map is not included in `environment`, all labels are cleared. It is an error to provide both this mask and a mask specifying one or more individual labels. * `labels.`labelName * Set the label named *labelName*, while preserving other labels. To delete the label, include it in `updateMask` and omit its mapping in `environment.labels`. It is an error to provide both a mask of this form and the `labels` mask. * `config.nodeCount` * Horizontally scale the number of nodes in the environment. An integer greater than or equal to 3 must be provided in the `config.nodeCount` field. * `config.webServerNetworkAccessControl` * Replace the environment's current `WebServerNetworkAccessControl`. * `config.databaseConfig` * Replace the environment's current `DatabaseConfig`. * `config.webServerConfig` * Replace the environment's current `WebServerConfig`. * `config.softwareConfig.airflowConfigOverrides` * Replace all Apache Airflow config overrides. If a replacement config overrides map is not included in `environment`, all config overrides are cleared. It is an error to provide both this mask and a mask specifying one or more individual config overrides. * `config.softwareConfig.airflowConfigOverrides.`section-name * Override the Apache Airflow config property *name* in the section named *section*, preserving other properties. To delete the property override, include it in `updateMask` and omit its mapping in `environment.config.softwareConfig.airflowConfigOverrides`. It is an error to provide both a mask of this form and the `config.softwareConfig.airflowConfigOverrides` mask. * `config.softwareConfig.envVariables` * Replace all environment variables. If a replacement environment variable map is not included in `environment`, all custom environment variables are cleared. It is an error to provide both this mask and a mask specifying one or more individual environment variables. * `config.softwareConfig.schedulerCount` * Horizontally scale the number of schedulers in Airflow. A positive integer not greater than the number of nodes must be provided in the `config.softwareConfig.schedulerCount` field. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-2.*.*. + updateMask: string, Required. A comma-separated list of paths, relative to `Environment`, of fields to update. For example, to set the version of scikit-learn to install in the environment to 0.19.0 and to remove an existing installation of numpy, the `updateMask` parameter would include the following two `paths` values: "config.softwareConfig.pypiPackages.scikit-learn" and "config.softwareConfig.pypiPackages.numpy". The included patch environment would specify the scikit-learn version as follows: { "config":{ "softwareConfig":{ "pypiPackages":{ "scikit-learn":"==0.19.0" } } } } Note that in the above example, any existing PyPI packages other than scikit-learn and numpy will be unaffected. Only one update type may be included in a single request's `updateMask`. For example, one cannot update both the PyPI packages and labels in the same request. However, it is possible to update multiple members of a map field simultaneously in the same request. For example, to set the labels "label1" and "label2" while clearing "label3" (assuming it already exists), one can provide the paths "labels.label1", "labels.label2", and "labels.label3" and populate the patch environment as follows: { "labels":{ "label1":"new-label1-value" "label2":"new-label2-value" } } Note that in the above example, any existing labels that are not included in the `updateMask` will be unaffected. It is also possible to replace an entire map field by providing the map field's path in the `updateMask`. The new value of the field will be that which is provided in the patch environment. For example, to delete all pre-existing user-specified PyPI packages and install botocore at version 1.7.14, the `updateMask` would contain the path "config.softwareConfig.pypiPackages", and the patch environment would be the following: { "config":{ "softwareConfig":{ "pypiPackages":{ "botocore":"==1.7.14" } } } } **Note:** Only the following fields can be updated: * `config.softwareConfig.pypiPackages` * Replace all custom custom PyPI packages. If a replacement package map is not included in `environment`, all custom PyPI packages are cleared. It is an error to provide both this mask and a mask specifying an individual package. * `config.softwareConfig.pypiPackages.`packagename * Update the custom PyPI package *packagename*, preserving other packages. To delete the package, include it in `updateMask`, and omit the mapping for it in `environment.config.softwareConfig.pypiPackages`. It is an error to provide both a mask of this form and the `config.softwareConfig.pypiPackages` mask. * `labels` * Replace all environment labels. If a replacement labels map is not included in `environment`, all labels are cleared. It is an error to provide both this mask and a mask specifying one or more individual labels. * `labels.`labelName * Set the label named *labelName*, while preserving other labels. To delete the label, include it in `updateMask` and omit its mapping in `environment.labels`. It is an error to provide both a mask of this form and the `labels` mask. * `config.nodeCount` * Horizontally scale the number of nodes in the environment. An integer greater than or equal to 3 must be provided in the `config.nodeCount` field. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. * `config.webServerNetworkAccessControl` * Replace the environment's current `WebServerNetworkAccessControl`. * `config.databaseConfig` Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. * Replace the environment's current `DatabaseConfig`. * `config.webServerConfig.machineType` * Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. * `config.softwareConfig.airflowConfigOverrides` * Replace all Apache Airflow config overrides. If a replacement config overrides map is not included in `environment`, all config overrides are cleared. It is an error to provide both this mask and a mask specifying one or more individual config overrides. * `config.softwareConfig.airflowConfigOverrides.`section-name * Override the Apache Airflow config property *name* in the section named *section*, preserving other properties. To delete the property override, include it in `updateMask` and omit its mapping in `environment.config.softwareConfig.airflowConfigOverrides`. It is an error to provide both a mask of this form and the `config.softwareConfig.airflowConfigOverrides` mask. * `config.softwareConfig.envVariables` * Replace all environment variables. If a replacement environment variable map is not included in `environment`, all custom environment variables are cleared. It is an error to provide both this mask and a mask specifying one or more individual environment variables. * `config.softwareConfig.schedulerCount` * Horizontally scale the number of schedulers in Airflow. A positive integer not greater than the number of nodes must be provided in the `config.softwareConfig.schedulerCount` field. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-2.*.*. * `config.databaseConfig.machineType` * Cloud SQL machine type used by Airflow database. It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 or db-n1-standard-16. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. * `config.webServerConfig.machineType` * Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format diff --git a/docs/dyn/composer_v1beta1.projects.locations.environments.html b/docs/dyn/composer_v1beta1.projects.locations.environments.html index 6923ca6a8d5..342eba98466 100644 --- a/docs/dyn/composer_v1beta1.projects.locations.environments.html +++ b/docs/dyn/composer_v1beta1.projects.locations.environments.html @@ -179,10 +179,10 @@

Method Details

"diskSizeGb": 42, # Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "enableIpMasqAgent": True or False, # Optional. Deploys 'ip-masq-agent' daemon set in the GKE cluster and defines nonMasqueradeCIDRs equals to pod IP range so IP masquerading is used for all destination addresses, except between pods traffic. See: https://cloud.google.com/kubernetes-engine/docs/how-to/ip-masquerade-agent "ipAllocationPolicy": { # Configuration for controlling how IPs are allocated in the GKE cluster. # Optional. The IPAllocationPolicy fields for the GKE cluster. - "clusterIpv4CidrBlock": "A String", # Optional. The IP address range used to allocate IP addresses to pods in the cluster. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. - "clusterSecondaryRangeName": "A String", # Optional. The name of the cluster's secondary range used to allocate IP addresses to pods. Specify either `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. - "servicesIpv4CidrBlock": "A String", # Optional. The IP address range of the services IP addresses in this cluster. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. - "servicesSecondaryRangeName": "A String", # Optional. The name of the services' secondary range used to allocate IP addresses to the cluster. Specify either `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. + "clusterIpv4CidrBlock": "A String", # Optional. The IP address range used to allocate IP addresses to pods in the cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. + "clusterSecondaryRangeName": "A String", # Optional. The name of the cluster's secondary range used to allocate IP addresses to pods. Specify either `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. + "servicesIpv4CidrBlock": "A String", # Optional. The IP address range of the services IP addresses in this cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. + "servicesSecondaryRangeName": "A String", # Optional. The name of the services' secondary range used to allocate IP addresses to the cluster. Specify either `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. "useIpAliases": True or False, # Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use VPC-native GKE clusters. }, "location": "A String", # Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/zones/{zoneId}". This `location` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. @@ -368,10 +368,10 @@

Method Details

"diskSizeGb": 42, # Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "enableIpMasqAgent": True or False, # Optional. Deploys 'ip-masq-agent' daemon set in the GKE cluster and defines nonMasqueradeCIDRs equals to pod IP range so IP masquerading is used for all destination addresses, except between pods traffic. See: https://cloud.google.com/kubernetes-engine/docs/how-to/ip-masquerade-agent "ipAllocationPolicy": { # Configuration for controlling how IPs are allocated in the GKE cluster. # Optional. The IPAllocationPolicy fields for the GKE cluster. - "clusterIpv4CidrBlock": "A String", # Optional. The IP address range used to allocate IP addresses to pods in the cluster. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. - "clusterSecondaryRangeName": "A String", # Optional. The name of the cluster's secondary range used to allocate IP addresses to pods. Specify either `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. - "servicesIpv4CidrBlock": "A String", # Optional. The IP address range of the services IP addresses in this cluster. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. - "servicesSecondaryRangeName": "A String", # Optional. The name of the services' secondary range used to allocate IP addresses to the cluster. Specify either `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. + "clusterIpv4CidrBlock": "A String", # Optional. The IP address range used to allocate IP addresses to pods in the cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. + "clusterSecondaryRangeName": "A String", # Optional. The name of the cluster's secondary range used to allocate IP addresses to pods. Specify either `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. + "servicesIpv4CidrBlock": "A String", # Optional. The IP address range of the services IP addresses in this cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. + "servicesSecondaryRangeName": "A String", # Optional. The name of the services' secondary range used to allocate IP addresses to the cluster. Specify either `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. "useIpAliases": True or False, # Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use VPC-native GKE clusters. }, "location": "A String", # Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/zones/{zoneId}". This `location` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. @@ -498,10 +498,10 @@

Method Details

"diskSizeGb": 42, # Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "enableIpMasqAgent": True or False, # Optional. Deploys 'ip-masq-agent' daemon set in the GKE cluster and defines nonMasqueradeCIDRs equals to pod IP range so IP masquerading is used for all destination addresses, except between pods traffic. See: https://cloud.google.com/kubernetes-engine/docs/how-to/ip-masquerade-agent "ipAllocationPolicy": { # Configuration for controlling how IPs are allocated in the GKE cluster. # Optional. The IPAllocationPolicy fields for the GKE cluster. - "clusterIpv4CidrBlock": "A String", # Optional. The IP address range used to allocate IP addresses to pods in the cluster. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. - "clusterSecondaryRangeName": "A String", # Optional. The name of the cluster's secondary range used to allocate IP addresses to pods. Specify either `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. - "servicesIpv4CidrBlock": "A String", # Optional. The IP address range of the services IP addresses in this cluster. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. - "servicesSecondaryRangeName": "A String", # Optional. The name of the services' secondary range used to allocate IP addresses to the cluster. Specify either `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. + "clusterIpv4CidrBlock": "A String", # Optional. The IP address range used to allocate IP addresses to pods in the cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. + "clusterSecondaryRangeName": "A String", # Optional. The name of the cluster's secondary range used to allocate IP addresses to pods. Specify either `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. + "servicesIpv4CidrBlock": "A String", # Optional. The IP address range of the services IP addresses in this cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. + "servicesSecondaryRangeName": "A String", # Optional. The name of the services' secondary range used to allocate IP addresses to the cluster. Specify either `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. "useIpAliases": True or False, # Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use VPC-native GKE clusters. }, "location": "A String", # Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/zones/{zoneId}". This `location` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. @@ -636,10 +636,10 @@

Method Details

"diskSizeGb": 42, # Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. "enableIpMasqAgent": True or False, # Optional. Deploys 'ip-masq-agent' daemon set in the GKE cluster and defines nonMasqueradeCIDRs equals to pod IP range so IP masquerading is used for all destination addresses, except between pods traffic. See: https://cloud.google.com/kubernetes-engine/docs/how-to/ip-masquerade-agent "ipAllocationPolicy": { # Configuration for controlling how IPs are allocated in the GKE cluster. # Optional. The IPAllocationPolicy fields for the GKE cluster. - "clusterIpv4CidrBlock": "A String", # Optional. The IP address range used to allocate IP addresses to pods in the cluster. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. - "clusterSecondaryRangeName": "A String", # Optional. The name of the cluster's secondary range used to allocate IP addresses to pods. Specify either `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. - "servicesIpv4CidrBlock": "A String", # Optional. The IP address range of the services IP addresses in this cluster. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. - "servicesSecondaryRangeName": "A String", # Optional. The name of the services' secondary range used to allocate IP addresses to the cluster. Specify either `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. + "clusterIpv4CidrBlock": "A String", # Optional. The IP address range used to allocate IP addresses to pods in the cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. + "clusterSecondaryRangeName": "A String", # Optional. The name of the cluster's secondary range used to allocate IP addresses to pods. Specify either `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. + "servicesIpv4CidrBlock": "A String", # Optional. The IP address range of the services IP addresses in this cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. + "servicesSecondaryRangeName": "A String", # Optional. The name of the services' secondary range used to allocate IP addresses to the cluster. Specify either `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. "useIpAliases": True or False, # Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use VPC-native GKE clusters. }, "location": "A String", # Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: "projects/{projectId}/zones/{zoneId}". This `location` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. diff --git a/docs/dyn/container_v1.projects.locations.clusters.html b/docs/dyn/container_v1.projects.locations.clusters.html index 0bfa22a962c..91dab9006d3 100644 --- a/docs/dyn/container_v1.projects.locations.clusters.html +++ b/docs/dyn/container_v1.projects.locations.clusters.html @@ -520,8 +520,8 @@

Method Details

"autoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "conditions": [ # Which conditions caused the current node pool state. { # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED). @@ -1121,8 +1121,8 @@

Method Details

"autoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "conditions": [ # Which conditions caused the current node pool state. { # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED). @@ -1625,8 +1625,8 @@

Method Details

"autoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "conditions": [ # Which conditions caused the current node pool state. { # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED). @@ -2784,8 +2784,8 @@

Method Details

"desiredNodePoolAutoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for the node pool specified in desired_node_pool_id. If there is only one pool in the cluster and desired_node_pool_id is not provided then the change applies to that single node pool. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "desiredNodePoolId": "A String", # The node pool to be upgraded. This field is mandatory if "desired_node_version", "desired_image_family" or "desired_node_pool_autoscaling" is specified and there is more than one node pool on the cluster. "desiredNodeVersion": "A String", # The Kubernetes version to change the nodes to (typically an upgrade). Users may specify either explicit versions offered by Kubernetes Engine or version aliases, which have the following behavior: - "latest": picks the highest valid Kubernetes version - "1.X": picks the highest valid patch+gke.N patch in the 1.X version - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version - "1.X.Y-gke.N": picks an explicit Kubernetes version - "-": picks the Kubernetes master version diff --git a/docs/dyn/container_v1.projects.locations.clusters.nodePools.html b/docs/dyn/container_v1.projects.locations.clusters.nodePools.html index a0cdca62544..88d52a5458f 100644 --- a/docs/dyn/container_v1.projects.locations.clusters.nodePools.html +++ b/docs/dyn/container_v1.projects.locations.clusters.nodePools.html @@ -125,8 +125,8 @@

Method Details

"autoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "conditions": [ # Which conditions caused the current node pool state. { # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED). @@ -396,8 +396,8 @@

Method Details

"autoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "conditions": [ # Which conditions caused the current node pool state. { # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED). @@ -533,8 +533,8 @@

Method Details

"autoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "conditions": [ # Which conditions caused the current node pool state. { # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED). @@ -740,8 +740,8 @@

Method Details

"autoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Required. Autoscaling configuration for the node pool. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "clusterId": "A String", # Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field. "name": "A String", # The name (project, location, cluster, node pool) of the node pool to set autoscaler settings. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`. diff --git a/docs/dyn/container_v1.projects.zones.clusters.html b/docs/dyn/container_v1.projects.zones.clusters.html index b25cf271c88..00bbe854122 100644 --- a/docs/dyn/container_v1.projects.zones.clusters.html +++ b/docs/dyn/container_v1.projects.zones.clusters.html @@ -624,8 +624,8 @@

Method Details

"autoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "conditions": [ # Which conditions caused the current node pool state. { # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED). @@ -1225,8 +1225,8 @@

Method Details

"autoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "conditions": [ # Which conditions caused the current node pool state. { # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED). @@ -1773,8 +1773,8 @@

Method Details

"autoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "conditions": [ # Which conditions caused the current node pool state. { # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED). @@ -2845,8 +2845,8 @@

Method Details

"desiredNodePoolAutoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for the node pool specified in desired_node_pool_id. If there is only one pool in the cluster and desired_node_pool_id is not provided then the change applies to that single node pool. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "desiredNodePoolId": "A String", # The node pool to be upgraded. This field is mandatory if "desired_node_version", "desired_image_family" or "desired_node_pool_autoscaling" is specified and there is more than one node pool on the cluster. "desiredNodeVersion": "A String", # The Kubernetes version to change the nodes to (typically an upgrade). Users may specify either explicit versions offered by Kubernetes Engine or version aliases, which have the following behavior: - "latest": picks the highest valid Kubernetes version - "1.X": picks the highest valid patch+gke.N patch in the 1.X version - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version - "1.X.Y-gke.N": picks an explicit Kubernetes version - "-": picks the Kubernetes master version diff --git a/docs/dyn/container_v1.projects.zones.clusters.nodePools.html b/docs/dyn/container_v1.projects.zones.clusters.nodePools.html index 495e4650177..695a62d230e 100644 --- a/docs/dyn/container_v1.projects.zones.clusters.nodePools.html +++ b/docs/dyn/container_v1.projects.zones.clusters.nodePools.html @@ -121,8 +121,8 @@

Method Details

"autoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Required. Autoscaling configuration for the node pool. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "clusterId": "A String", # Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field. "name": "A String", # The name (project, location, cluster, node pool) of the node pool to set autoscaler settings. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`. @@ -214,8 +214,8 @@

Method Details

"autoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "conditions": [ # Which conditions caused the current node pool state. { # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED). @@ -485,8 +485,8 @@

Method Details

"autoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "conditions": [ # Which conditions caused the current node pool state. { # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED). @@ -622,8 +622,8 @@

Method Details

"autoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "conditions": [ # Which conditions caused the current node pool state. { # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED). diff --git a/docs/dyn/container_v1beta1.projects.locations.clusters.html b/docs/dyn/container_v1beta1.projects.locations.clusters.html index 600aa9e9f2d..af83aefd0ee 100644 --- a/docs/dyn/container_v1beta1.projects.locations.clusters.html +++ b/docs/dyn/container_v1beta1.projects.locations.clusters.html @@ -554,8 +554,8 @@

Method Details

"autoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "conditions": [ # Which conditions caused the current node pool state. { # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED). @@ -1208,8 +1208,8 @@

Method Details

"autoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "conditions": [ # Which conditions caused the current node pool state. { # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED). @@ -1765,8 +1765,8 @@

Method Details

"autoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "conditions": [ # Which conditions caused the current node pool state. { # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED). @@ -2970,8 +2970,8 @@

Method Details

"desiredNodePoolAutoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for the node pool specified in desired_node_pool_id. If there is only one pool in the cluster and desired_node_pool_id is not provided then the change applies to that single node pool. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "desiredNodePoolId": "A String", # The node pool to be upgraded. This field is mandatory if "desired_node_version", "desired_image_family", "desired_node_pool_autoscaling", or "desired_workload_metadata_config" is specified and there is more than one node pool on the cluster. "desiredNodeVersion": "A String", # The Kubernetes version to change the nodes to (typically an upgrade). Users may specify either explicit versions offered by Kubernetes Engine or version aliases, which have the following behavior: - "latest": picks the highest valid Kubernetes version - "1.X": picks the highest valid patch+gke.N patch in the 1.X version - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version - "1.X.Y-gke.N": picks an explicit Kubernetes version - "-": picks the Kubernetes master version diff --git a/docs/dyn/container_v1beta1.projects.locations.clusters.nodePools.html b/docs/dyn/container_v1beta1.projects.locations.clusters.nodePools.html index 8de0756421c..cebc1557200 100644 --- a/docs/dyn/container_v1beta1.projects.locations.clusters.nodePools.html +++ b/docs/dyn/container_v1beta1.projects.locations.clusters.nodePools.html @@ -125,8 +125,8 @@

Method Details

"autoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "conditions": [ # Which conditions caused the current node pool state. { # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED). @@ -401,8 +401,8 @@

Method Details

"autoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "conditions": [ # Which conditions caused the current node pool state. { # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED). @@ -543,8 +543,8 @@

Method Details

"autoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "conditions": [ # Which conditions caused the current node pool state. { # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED). @@ -755,8 +755,8 @@

Method Details

"autoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Required. Autoscaling configuration for the node pool. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "clusterId": "A String", # Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field. "name": "A String", # The name (project, location, cluster, node pool) of the node pool to set autoscaler settings. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`. diff --git a/docs/dyn/container_v1beta1.projects.zones.clusters.html b/docs/dyn/container_v1beta1.projects.zones.clusters.html index ba33ce76de0..ca561f033fb 100644 --- a/docs/dyn/container_v1beta1.projects.zones.clusters.html +++ b/docs/dyn/container_v1beta1.projects.zones.clusters.html @@ -665,8 +665,8 @@

Method Details

"autoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "conditions": [ # Which conditions caused the current node pool state. { # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED). @@ -1319,8 +1319,8 @@

Method Details

"autoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "conditions": [ # Which conditions caused the current node pool state. { # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED). @@ -1920,8 +1920,8 @@

Method Details

"autoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "conditions": [ # Which conditions caused the current node pool state. { # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED). @@ -3031,8 +3031,8 @@

Method Details

"desiredNodePoolAutoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for the node pool specified in desired_node_pool_id. If there is only one pool in the cluster and desired_node_pool_id is not provided then the change applies to that single node pool. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "desiredNodePoolId": "A String", # The node pool to be upgraded. This field is mandatory if "desired_node_version", "desired_image_family", "desired_node_pool_autoscaling", or "desired_workload_metadata_config" is specified and there is more than one node pool on the cluster. "desiredNodeVersion": "A String", # The Kubernetes version to change the nodes to (typically an upgrade). Users may specify either explicit versions offered by Kubernetes Engine or version aliases, which have the following behavior: - "latest": picks the highest valid Kubernetes version - "1.X": picks the highest valid patch+gke.N patch in the 1.X version - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version - "1.X.Y-gke.N": picks an explicit Kubernetes version - "-": picks the Kubernetes master version diff --git a/docs/dyn/container_v1beta1.projects.zones.clusters.nodePools.html b/docs/dyn/container_v1beta1.projects.zones.clusters.nodePools.html index 83a022e083d..7335d2a60c0 100644 --- a/docs/dyn/container_v1beta1.projects.zones.clusters.nodePools.html +++ b/docs/dyn/container_v1beta1.projects.zones.clusters.nodePools.html @@ -121,8 +121,8 @@

Method Details

"autoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Required. Autoscaling configuration for the node pool. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "clusterId": "A String", # Required. Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field. "name": "A String", # The name (project, location, cluster, node pool) of the node pool to set autoscaler settings. Specified in the format `projects/*/locations/*/clusters/*/nodePools/*`. @@ -214,8 +214,8 @@

Method Details

"autoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "conditions": [ # Which conditions caused the current node pool state. { # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED). @@ -490,8 +490,8 @@

Method Details

"autoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "conditions": [ # Which conditions caused the current node pool state. { # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED). @@ -632,8 +632,8 @@

Method Details

"autoscaling": { # NodePoolAutoscaling contains information required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. # Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present. "autoprovisioned": True or False, # Can this node pool be deleted automatically. "enabled": True or False, # Is autoscaling enabled for this node pool. - "maxNodeCount": 42, # Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. - "minNodeCount": 42, # Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + "maxNodeCount": 42, # Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster. + "minNodeCount": 42, # Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count. }, "conditions": [ # Which conditions caused the current node pool state. { # StatusCondition describes why a cluster or a node pool has a certain status (e.g., ERROR or DEGRADED). diff --git a/docs/dyn/content_v2_1.accounts.html b/docs/dyn/content_v2_1.accounts.html index cdf2ba7d3db..2b02034549f 100644 --- a/docs/dyn/content_v2_1.accounts.html +++ b/docs/dyn/content_v2_1.accounts.html @@ -225,7 +225,7 @@

Method Details

"url": "A String", # Customer service URL. }, "koreanBusinessRegistrationNumber": "A String", # The 10-digit [Korean business registration number](https://support.google.com/merchants/answer/9037766) separated with dashes in the format: XXX-XX-XXXXX. This field will only be updated if explicitly set. - "phoneNumber": "A String", # The phone number of the business. + "phoneNumber": "A String", # ! The phone number of the business. This can only be updated if a verified ! phone number is not already set. To replace a verified phone number use ! the `Accounts.requestphoneverification` and ! `Accounts.verifyphonenumber`. }, "cssId": "A String", # ID of CSS the account belongs to. "googleMyBusinessLink": { # The GMB account which is linked or in the process of being linked with the Merchant Center account. @@ -316,7 +316,7 @@

Method Details

"url": "A String", # Customer service URL. }, "koreanBusinessRegistrationNumber": "A String", # The 10-digit [Korean business registration number](https://support.google.com/merchants/answer/9037766) separated with dashes in the format: XXX-XX-XXXXX. This field will only be updated if explicitly set. - "phoneNumber": "A String", # The phone number of the business. + "phoneNumber": "A String", # ! The phone number of the business. This can only be updated if a verified ! phone number is not already set. To replace a verified phone number use ! the `Accounts.requestphoneverification` and ! `Accounts.verifyphonenumber`. }, "cssId": "A String", # ID of CSS the account belongs to. "googleMyBusinessLink": { # The GMB account which is linked or in the process of being linked with the Merchant Center account. @@ -427,7 +427,7 @@

Method Details

"url": "A String", # Customer service URL. }, "koreanBusinessRegistrationNumber": "A String", # The 10-digit [Korean business registration number](https://support.google.com/merchants/answer/9037766) separated with dashes in the format: XXX-XX-XXXXX. This field will only be updated if explicitly set. - "phoneNumber": "A String", # The phone number of the business. + "phoneNumber": "A String", # ! The phone number of the business. This can only be updated if a verified ! phone number is not already set. To replace a verified phone number use ! the `Accounts.requestphoneverification` and ! `Accounts.verifyphonenumber`. }, "cssId": "A String", # ID of CSS the account belongs to. "googleMyBusinessLink": { # The GMB account which is linked or in the process of being linked with the Merchant Center account. @@ -496,7 +496,7 @@

Method Details

"url": "A String", # Customer service URL. }, "koreanBusinessRegistrationNumber": "A String", # The 10-digit [Korean business registration number](https://support.google.com/merchants/answer/9037766) separated with dashes in the format: XXX-XX-XXXXX. This field will only be updated if explicitly set. - "phoneNumber": "A String", # The phone number of the business. + "phoneNumber": "A String", # ! The phone number of the business. This can only be updated if a verified ! phone number is not already set. To replace a verified phone number use ! the `Accounts.requestphoneverification` and ! `Accounts.verifyphonenumber`. }, "cssId": "A String", # ID of CSS the account belongs to. "googleMyBusinessLink": { # The GMB account which is linked or in the process of being linked with the Merchant Center account. @@ -563,7 +563,7 @@

Method Details

"url": "A String", # Customer service URL. }, "koreanBusinessRegistrationNumber": "A String", # The 10-digit [Korean business registration number](https://support.google.com/merchants/answer/9037766) separated with dashes in the format: XXX-XX-XXXXX. This field will only be updated if explicitly set. - "phoneNumber": "A String", # The phone number of the business. + "phoneNumber": "A String", # ! The phone number of the business. This can only be updated if a verified ! phone number is not already set. To replace a verified phone number use ! the `Accounts.requestphoneverification` and ! `Accounts.verifyphonenumber`. }, "cssId": "A String", # ID of CSS the account belongs to. "googleMyBusinessLink": { # The GMB account which is linked or in the process of being linked with the Merchant Center account. @@ -609,6 +609,9 @@

Method Details

{ "action": "A String", # Action to perform for this link. The `"request"` action is only available to select merchants. Acceptable values are: - "`approve`" - "`remove`" - "`request`" + "eCommercePlatformLinkInfo": { # Additional information required for E_COMMERCE_PLATFORM link type. # Additional information required for `eCommercePlatform` link type. + "externalAccountId": "A String", # The id used by the third party service provider to identify the merchant. + }, "linkType": "A String", # Type of the link between the two accounts. Acceptable values are: - "`channelPartner`" - "`eCommercePlatform`" - "`paymentServiceProvider`" "linkedAccountId": "A String", # The ID of the linked account. "paymentServiceProviderLinkInfo": { # Additional information required for PAYMENT_SERVICE_PROVIDER link type. # Additional information required for `paymentServiceProvider` link type. @@ -685,7 +688,7 @@

Method Details

"url": "A String", # Customer service URL. }, "koreanBusinessRegistrationNumber": "A String", # The 10-digit [Korean business registration number](https://support.google.com/merchants/answer/9037766) separated with dashes in the format: XXX-XX-XXXXX. This field will only be updated if explicitly set. - "phoneNumber": "A String", # The phone number of the business. + "phoneNumber": "A String", # ! The phone number of the business. This can only be updated if a verified ! phone number is not already set. To replace a verified phone number use ! the `Accounts.requestphoneverification` and ! `Accounts.verifyphonenumber`. }, "cssId": "A String", # ID of CSS the account belongs to. "googleMyBusinessLink": { # The GMB account which is linked or in the process of being linked with the Merchant Center account. @@ -849,7 +852,7 @@

Method Details

"url": "A String", # Customer service URL. }, "koreanBusinessRegistrationNumber": "A String", # The 10-digit [Korean business registration number](https://support.google.com/merchants/answer/9037766) separated with dashes in the format: XXX-XX-XXXXX. This field will only be updated if explicitly set. - "phoneNumber": "A String", # The phone number of the business. + "phoneNumber": "A String", # ! The phone number of the business. This can only be updated if a verified ! phone number is not already set. To replace a verified phone number use ! the `Accounts.requestphoneverification` and ! `Accounts.verifyphonenumber`. }, "cssId": "A String", # ID of CSS the account belongs to. "googleMyBusinessLink": { # The GMB account which is linked or in the process of being linked with the Merchant Center account. @@ -916,7 +919,7 @@

Method Details

"url": "A String", # Customer service URL. }, "koreanBusinessRegistrationNumber": "A String", # The 10-digit [Korean business registration number](https://support.google.com/merchants/answer/9037766) separated with dashes in the format: XXX-XX-XXXXX. This field will only be updated if explicitly set. - "phoneNumber": "A String", # The phone number of the business. + "phoneNumber": "A String", # ! The phone number of the business. This can only be updated if a verified ! phone number is not already set. To replace a verified phone number use ! the `Accounts.requestphoneverification` and ! `Accounts.verifyphonenumber`. }, "cssId": "A String", # ID of CSS the account belongs to. "googleMyBusinessLink": { # The GMB account which is linked or in the process of being linked with the Merchant Center account. diff --git a/docs/dyn/content_v2_1.localinventory.html b/docs/dyn/content_v2_1.localinventory.html index 6e38ec2e478..06b2d6f8aca 100644 --- a/docs/dyn/content_v2_1.localinventory.html +++ b/docs/dyn/content_v2_1.localinventory.html @@ -105,7 +105,7 @@

Method Details

"availability": "A String", # Availability of the product. For accepted attribute values, see the local product inventory feed specification. "instoreProductLocation": "A String", # In-store product location. "kind": "A String", # Identifies what kind of resource this is. Value: the fixed string "`content#localInventory`" - "pickupMethod": "A String", # Supported pickup method for this offer. Unless the value is "not supported", this field must be submitted together with `pickupSla`. For accepted attribute values, see the local product inventory feed // specification. + "pickupMethod": "A String", # Supported pickup method for this offer. Unless the value is "not supported", this field must be submitted together with `pickupSla`. For accepted attribute values, see the local product inventory feed specification. "pickupSla": "A String", # Expected date that an order will be ready for pickup relative to the order date. Must be submitted together with `pickupMethod`. For accepted attribute values, see the local product inventory feed specification. "price": { # Price of the product. "currency": "A String", # The currency of the price. @@ -170,7 +170,7 @@

Method Details

"availability": "A String", # Availability of the product. For accepted attribute values, see the local product inventory feed specification. "instoreProductLocation": "A String", # In-store product location. "kind": "A String", # Identifies what kind of resource this is. Value: the fixed string "`content#localInventory`" - "pickupMethod": "A String", # Supported pickup method for this offer. Unless the value is "not supported", this field must be submitted together with `pickupSla`. For accepted attribute values, see the local product inventory feed // specification. + "pickupMethod": "A String", # Supported pickup method for this offer. Unless the value is "not supported", this field must be submitted together with `pickupSla`. For accepted attribute values, see the local product inventory feed specification. "pickupSla": "A String", # Expected date that an order will be ready for pickup relative to the order date. Must be submitted together with `pickupMethod`. For accepted attribute values, see the local product inventory feed specification. "price": { # Price of the product. "currency": "A String", # The currency of the price. @@ -197,7 +197,7 @@

Method Details

"availability": "A String", # Availability of the product. For accepted attribute values, see the local product inventory feed specification. "instoreProductLocation": "A String", # In-store product location. "kind": "A String", # Identifies what kind of resource this is. Value: the fixed string "`content#localInventory`" - "pickupMethod": "A String", # Supported pickup method for this offer. Unless the value is "not supported", this field must be submitted together with `pickupSla`. For accepted attribute values, see the local product inventory feed // specification. + "pickupMethod": "A String", # Supported pickup method for this offer. Unless the value is "not supported", this field must be submitted together with `pickupSla`. For accepted attribute values, see the local product inventory feed specification. "pickupSla": "A String", # Expected date that an order will be ready for pickup relative to the order date. Must be submitted together with `pickupMethod`. For accepted attribute values, see the local product inventory feed specification. "price": { # Price of the product. "currency": "A String", # The currency of the price. diff --git a/docs/dyn/content_v2_1.orders.html b/docs/dyn/content_v2_1.orders.html index b36f13609f1..0b98ab12387 100644 --- a/docs/dyn/content_v2_1.orders.html +++ b/docs/dyn/content_v2_1.orders.html @@ -91,7 +91,7 @@

Instance Methods

Sandbox only. Cancels a test order for customer-initiated cancellation.

captureOrder(merchantId, orderId, body=None, x__xgafv=None)

-

Capture funds from the customer for the current order total. This method should be called after the merchant verifies that they are able and ready to start shipping the order. This method blocks until a response is received from the payment processsor. If this method succeeds, the merchant is guaranteed to receive funds for the order after shipment. If the request fails, it can be retried or the order may be cancelled. This method cannot be called after the entire order is already shipped.

+

Capture funds from the customer for the current order total. This method should be called after the merchant verifies that they are able and ready to start shipping the order. This method blocks until a response is received from the payment processsor. If this method succeeds, the merchant is guaranteed to receive funds for the order after shipment. If the request fails, it can be retried or the order may be cancelled. This method cannot be called after the entire order is already shipped. A rejected error code is returned when the payment service provider has declined the charge. This indicates a problem between the PSP and either the merchant's or customer's account. Sometimes this error will be resolved by the customer. We recommend retrying these errors once per day or cancelling the order with reason `failedToCaptureFunds` if the items cannot be held.

close()

Close httplib2 connections.

@@ -207,7 +207,7 @@

Method Details

{ "operationId": "A String", # The ID of the operation. Unique across all operations for a given order. - "reason": "A String", # The reason for the cancellation. Acceptable values are: - "`customerInitiatedCancel`" - "`invalidCoupon`" - "`malformedShippingAddress`" - "`noInventory`" - "`other`" - "`priceError`" - "`shippingPriceError`" - "`taxError`" - "`undeliverableShippingAddress`" - "`unsupportedPoBoxAddress`" + "reason": "A String", # The reason for the cancellation. Acceptable values are: - "`customerInitiatedCancel`" - "`invalidCoupon`" - "`malformedShippingAddress`" - "`noInventory`" - "`other`" - "`priceError`" - "`shippingPriceError`" - "`taxError`" - "`undeliverableShippingAddress`" - "`unsupportedPoBoxAddress`" - "`failedToCaptureFunds`" "reasonText": "A String", # The explanation of the reason. } @@ -240,7 +240,7 @@

Method Details

"operationId": "A String", # The ID of the operation. Unique across all operations for a given order. "productId": "A String", # The ID of the product to cancel. This is the REST ID used in the products service. Either lineItemId or productId is required. "quantity": 42, # The quantity to cancel. - "reason": "A String", # The reason for the cancellation. Acceptable values are: - "`customerInitiatedCancel`" - "`invalidCoupon`" - "`malformedShippingAddress`" - "`noInventory`" - "`other`" - "`priceError`" - "`shippingPriceError`" - "`taxError`" - "`undeliverableShippingAddress`" - "`unsupportedPoBoxAddress`" + "reason": "A String", # The reason for the cancellation. Acceptable values are: - "`customerInitiatedCancel`" - "`invalidCoupon`" - "`malformedShippingAddress`" - "`noInventory`" - "`other`" - "`priceError`" - "`shippingPriceError`" - "`taxError`" - "`undeliverableShippingAddress`" - "`unsupportedPoBoxAddress`" - "`failedToCaptureFunds`" "reasonText": "A String", # The explanation of the reason. } @@ -287,7 +287,7 @@

Method Details

captureOrder(merchantId, orderId, body=None, x__xgafv=None) -
Capture funds from the customer for the current order total. This method should be called after the merchant verifies that they are able and ready to start shipping the order. This method blocks until a response is received from the payment processsor. If this method succeeds, the merchant is guaranteed to receive funds for the order after shipment. If the request fails, it can be retried or the order may be cancelled. This method cannot be called after the entire order is already shipped.
+  
Capture funds from the customer for the current order total. This method should be called after the merchant verifies that they are able and ready to start shipping the order. This method blocks until a response is received from the payment processsor. If this method succeeds, the merchant is guaranteed to receive funds for the order after shipment. If the request fails, it can be retried or the order may be cancelled. This method cannot be called after the entire order is already shipped. A rejected error code is returned when the payment service provider has declined the charge. This indicates a problem between the PSP and either the merchant's or customer's account. Sometimes this error will be resolved by the customer. We recommend retrying these errors once per day or cancelling the order with reason `failedToCaptureFunds` if the items cannot be held.
 
 Args:
   merchantId: string, Required. The ID of the account that manages the order. This cannot be a multi-client account. (required)
@@ -617,7 +617,7 @@ 

Method Details

"actor": "A String", # The actor that created the cancellation. Acceptable values are: - "`customer`" - "`googleBot`" - "`googleCustomerService`" - "`googlePayments`" - "`googleSabre`" - "`merchant`" "creationDate": "A String", # Date on which the cancellation has been created, in ISO 8601 format. "quantity": 42, # The quantity that was canceled. - "reason": "A String", # The reason for the cancellation. Orders that are canceled with a noInventory reason will lead to the removal of the product from Buy on Google until you make an update to that product. This will not affect your Shopping ads. Acceptable values are: - "`autoPostInternal`" - "`autoPostInvalidBillingAddress`" - "`autoPostNoInventory`" - "`autoPostPriceError`" - "`autoPostUndeliverableShippingAddress`" - "`couponAbuse`" - "`customerCanceled`" - "`customerInitiatedCancel`" - "`customerSupportRequested`" - "`failToPushOrderGoogleError`" - "`failToPushOrderMerchantError`" - "`failToPushOrderMerchantFulfillmentError`" - "`failToPushOrderToMerchant`" - "`failToPushOrderToMerchantOutOfStock`" - "`invalidCoupon`" - "`malformedShippingAddress`" - "`merchantDidNotShipOnTime`" - "`noInventory`" - "`orderTimeout`" - "`other`" - "`paymentAbuse`" - "`paymentDeclined`" - "`priceError`" - "`returnRefundAbuse`" - "`shippingPriceError`" - "`taxError`" - "`undeliverableShippingAddress`" - "`unsupportedPoBoxAddress`" + "reason": "A String", # The reason for the cancellation. Orders that are canceled with a noInventory reason will lead to the removal of the product from Buy on Google until you make an update to that product. This will not affect your Shopping ads. Acceptable values are: - "`autoPostInternal`" - "`autoPostInvalidBillingAddress`" - "`autoPostNoInventory`" - "`autoPostPriceError`" - "`autoPostUndeliverableShippingAddress`" - "`couponAbuse`" - "`customerCanceled`" - "`customerInitiatedCancel`" - "`customerSupportRequested`" - "`failToPushOrderGoogleError`" - "`failToPushOrderMerchantError`" - "`failToPushOrderMerchantFulfillmentError`" - "`failToPushOrderToMerchant`" - "`failToPushOrderToMerchantOutOfStock`" - "`invalidCoupon`" - "`malformedShippingAddress`" - "`merchantDidNotShipOnTime`" - "`noInventory`" - "`orderTimeout`" - "`other`" - "`paymentAbuse`" - "`paymentDeclined`" - "`priceError`" - "`returnRefundAbuse`" - "`shippingPriceError`" - "`taxError`" - "`undeliverableShippingAddress`" - "`unsupportedPoBoxAddress`" - "`failedToCaptureFunds`" "reasonText": "A String", # The explanation of the reason. }, ], @@ -915,7 +915,7 @@

Method Details

"actor": "A String", # The actor that created the cancellation. Acceptable values are: - "`customer`" - "`googleBot`" - "`googleCustomerService`" - "`googlePayments`" - "`googleSabre`" - "`merchant`" "creationDate": "A String", # Date on which the cancellation has been created, in ISO 8601 format. "quantity": 42, # The quantity that was canceled. - "reason": "A String", # The reason for the cancellation. Orders that are canceled with a noInventory reason will lead to the removal of the product from Buy on Google until you make an update to that product. This will not affect your Shopping ads. Acceptable values are: - "`autoPostInternal`" - "`autoPostInvalidBillingAddress`" - "`autoPostNoInventory`" - "`autoPostPriceError`" - "`autoPostUndeliverableShippingAddress`" - "`couponAbuse`" - "`customerCanceled`" - "`customerInitiatedCancel`" - "`customerSupportRequested`" - "`failToPushOrderGoogleError`" - "`failToPushOrderMerchantError`" - "`failToPushOrderMerchantFulfillmentError`" - "`failToPushOrderToMerchant`" - "`failToPushOrderToMerchantOutOfStock`" - "`invalidCoupon`" - "`malformedShippingAddress`" - "`merchantDidNotShipOnTime`" - "`noInventory`" - "`orderTimeout`" - "`other`" - "`paymentAbuse`" - "`paymentDeclined`" - "`priceError`" - "`returnRefundAbuse`" - "`shippingPriceError`" - "`taxError`" - "`undeliverableShippingAddress`" - "`unsupportedPoBoxAddress`" + "reason": "A String", # The reason for the cancellation. Orders that are canceled with a noInventory reason will lead to the removal of the product from Buy on Google until you make an update to that product. This will not affect your Shopping ads. Acceptable values are: - "`autoPostInternal`" - "`autoPostInvalidBillingAddress`" - "`autoPostNoInventory`" - "`autoPostPriceError`" - "`autoPostUndeliverableShippingAddress`" - "`couponAbuse`" - "`customerCanceled`" - "`customerInitiatedCancel`" - "`customerSupportRequested`" - "`failToPushOrderGoogleError`" - "`failToPushOrderMerchantError`" - "`failToPushOrderMerchantFulfillmentError`" - "`failToPushOrderToMerchant`" - "`failToPushOrderToMerchantOutOfStock`" - "`invalidCoupon`" - "`malformedShippingAddress`" - "`merchantDidNotShipOnTime`" - "`noInventory`" - "`orderTimeout`" - "`other`" - "`paymentAbuse`" - "`paymentDeclined`" - "`priceError`" - "`returnRefundAbuse`" - "`shippingPriceError`" - "`taxError`" - "`undeliverableShippingAddress`" - "`unsupportedPoBoxAddress`" - "`failedToCaptureFunds`" "reasonText": "A String", # The explanation of the reason. }, ], @@ -1448,7 +1448,7 @@

Method Details

"actor": "A String", # The actor that created the cancellation. Acceptable values are: - "`customer`" - "`googleBot`" - "`googleCustomerService`" - "`googlePayments`" - "`googleSabre`" - "`merchant`" "creationDate": "A String", # Date on which the cancellation has been created, in ISO 8601 format. "quantity": 42, # The quantity that was canceled. - "reason": "A String", # The reason for the cancellation. Orders that are canceled with a noInventory reason will lead to the removal of the product from Buy on Google until you make an update to that product. This will not affect your Shopping ads. Acceptable values are: - "`autoPostInternal`" - "`autoPostInvalidBillingAddress`" - "`autoPostNoInventory`" - "`autoPostPriceError`" - "`autoPostUndeliverableShippingAddress`" - "`couponAbuse`" - "`customerCanceled`" - "`customerInitiatedCancel`" - "`customerSupportRequested`" - "`failToPushOrderGoogleError`" - "`failToPushOrderMerchantError`" - "`failToPushOrderMerchantFulfillmentError`" - "`failToPushOrderToMerchant`" - "`failToPushOrderToMerchantOutOfStock`" - "`invalidCoupon`" - "`malformedShippingAddress`" - "`merchantDidNotShipOnTime`" - "`noInventory`" - "`orderTimeout`" - "`other`" - "`paymentAbuse`" - "`paymentDeclined`" - "`priceError`" - "`returnRefundAbuse`" - "`shippingPriceError`" - "`taxError`" - "`undeliverableShippingAddress`" - "`unsupportedPoBoxAddress`" + "reason": "A String", # The reason for the cancellation. Orders that are canceled with a noInventory reason will lead to the removal of the product from Buy on Google until you make an update to that product. This will not affect your Shopping ads. Acceptable values are: - "`autoPostInternal`" - "`autoPostInvalidBillingAddress`" - "`autoPostNoInventory`" - "`autoPostPriceError`" - "`autoPostUndeliverableShippingAddress`" - "`couponAbuse`" - "`customerCanceled`" - "`customerInitiatedCancel`" - "`customerSupportRequested`" - "`failToPushOrderGoogleError`" - "`failToPushOrderMerchantError`" - "`failToPushOrderMerchantFulfillmentError`" - "`failToPushOrderToMerchant`" - "`failToPushOrderToMerchantOutOfStock`" - "`invalidCoupon`" - "`malformedShippingAddress`" - "`merchantDidNotShipOnTime`" - "`noInventory`" - "`orderTimeout`" - "`other`" - "`paymentAbuse`" - "`paymentDeclined`" - "`priceError`" - "`returnRefundAbuse`" - "`shippingPriceError`" - "`taxError`" - "`undeliverableShippingAddress`" - "`unsupportedPoBoxAddress`" - "`failedToCaptureFunds`" "reasonText": "A String", # The explanation of the reason. }, ], diff --git a/docs/dyn/content_v2_1.promotions.html b/docs/dyn/content_v2_1.promotions.html index ea0a830e4b5..a1088f7c148 100644 --- a/docs/dyn/content_v2_1.promotions.html +++ b/docs/dyn/content_v2_1.promotions.html @@ -148,6 +148,12 @@

Method Details

"orderLimit": 42, # Order limit for the promotion. "percentOff": 42, # The percentage discount offered in the promotion. "productApplicability": "A String", # Required. Applicability of the promotion to either all products or only specific products. + "productType": [ # Product filter by product type for the promotion. + "A String", + ], + "productTypeExclusion": [ # Product filter by product type exclusion for the promotion. + "A String", + ], "promotionDestinationIds": [ # Destination ID for the promotion. "A String", ], @@ -224,6 +230,12 @@

Method Details

"orderLimit": 42, # Order limit for the promotion. "percentOff": 42, # The percentage discount offered in the promotion. "productApplicability": "A String", # Required. Applicability of the promotion to either all products or only specific products. + "productType": [ # Product filter by product type for the promotion. + "A String", + ], + "productTypeExclusion": [ # Product filter by product type exclusion for the promotion. + "A String", + ], "promotionDestinationIds": [ # Destination ID for the promotion. "A String", ], diff --git a/docs/dyn/datastore_v1.projects.html b/docs/dyn/datastore_v1.projects.html index 96acf9cb890..4f71a3fd441 100644 --- a/docs/dyn/datastore_v1.projects.html +++ b/docs/dyn/datastore_v1.projects.html @@ -248,40 +248,7 @@

Method Details

], }, "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. - "a_key": { # A message that can hold any of the supported value types and associated metadata. - "arrayValue": { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`. - "values": [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for 'exclude_from_indexes'. - # Object with schema name: Value - ], - }, - "blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. - "booleanValue": True or False, # A boolean value. - "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. - "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. - "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. - "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. - "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. - }, - "integerValue": "A String", # An integer value. - "keyValue": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value. - "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. - "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. - "projectId": "A String", # The ID of the project to which the entities belong. - }, - "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. - { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. - "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. - "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - }, - ], - }, - "meaning": 42, # The `meaning` field should only be populated for backwards compatibility. - "nullValue": "A String", # A null value. - "stringValue": "A String", # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes. - "timestampValue": "A String", # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down. - }, + "a_key": # Object with schema name: Value }, }, "update": { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # The entity to update. The entity must already exist. Must have a complete key path. @@ -299,40 +266,7 @@

Method Details

], }, "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. - "a_key": { # A message that can hold any of the supported value types and associated metadata. - "arrayValue": { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`. - "values": [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for 'exclude_from_indexes'. - # Object with schema name: Value - ], - }, - "blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. - "booleanValue": True or False, # A boolean value. - "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. - "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. - "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. - "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. - "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. - }, - "integerValue": "A String", # An integer value. - "keyValue": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value. - "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. - "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. - "projectId": "A String", # The ID of the project to which the entities belong. - }, - "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. - { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. - "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. - "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - }, - ], - }, - "meaning": 42, # The `meaning` field should only be populated for backwards compatibility. - "nullValue": "A String", # A null value. - "stringValue": "A String", # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes. - "timestampValue": "A String", # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down. - }, + "a_key": # Object with schema name: Value }, }, "upsert": { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # The entity to upsert. The entity may or may not already exist. The entity key's final path element may be incomplete. @@ -350,40 +284,7 @@

Method Details

], }, "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. - "a_key": { # A message that can hold any of the supported value types and associated metadata. - "arrayValue": { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`. - "values": [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for 'exclude_from_indexes'. - # Object with schema name: Value - ], - }, - "blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. - "booleanValue": True or False, # A boolean value. - "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. - "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. - "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. - "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. - "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. - }, - "integerValue": "A String", # An integer value. - "keyValue": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value. - "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. - "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. - "projectId": "A String", # The ID of the project to which the entities belong. - }, - "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. - { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. - "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. - "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - }, - ], - }, - "meaning": 42, # The `meaning` field should only be populated for backwards compatibility. - "nullValue": "A String", # A null value. - "stringValue": "A String", # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes. - "timestampValue": "A String", # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down. - }, + "a_key": # Object with schema name: Value }, }, }, @@ -602,40 +503,7 @@

Method Details

], }, "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. - "a_key": { # A message that can hold any of the supported value types and associated metadata. - "arrayValue": { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`. - "values": [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for 'exclude_from_indexes'. - # Object with schema name: Value - ], - }, - "blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. - "booleanValue": True or False, # A boolean value. - "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. - "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. - "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. - "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. - "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. - }, - "integerValue": "A String", # An integer value. - "keyValue": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value. - "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. - "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. - "projectId": "A String", # The ID of the project to which the entities belong. - }, - "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. - { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. - "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. - "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - }, - ], - }, - "meaning": 42, # The `meaning` field should only be populated for backwards compatibility. - "nullValue": "A String", # A null value. - "stringValue": "A String", # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes. - "timestampValue": "A String", # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down. - }, + "a_key": # Object with schema name: Value }, }, "version": "A String", # The version of the entity, a strictly positive number that monotonically increases with changes to the entity. This field is set for `FULL` entity results. For missing entities in `LookupResponse`, this is the version of the snapshot that was used to look up the entity, and it is always set except for eventually consistent reads. @@ -659,40 +527,7 @@

Method Details

], }, "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. - "a_key": { # A message that can hold any of the supported value types and associated metadata. - "arrayValue": { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`. - "values": [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for 'exclude_from_indexes'. - # Object with schema name: Value - ], - }, - "blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. - "booleanValue": True or False, # A boolean value. - "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. - "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. - "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. - "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. - "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. - }, - "integerValue": "A String", # An integer value. - "keyValue": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value. - "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. - "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. - "projectId": "A String", # The ID of the project to which the entities belong. - }, - "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. - { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. - "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. - "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - }, - ], - }, - "meaning": 42, # The `meaning` field should only be populated for backwards compatibility. - "nullValue": "A String", # A null value. - "stringValue": "A String", # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes. - "timestampValue": "A String", # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down. - }, + "a_key": # Object with schema name: Value }, }, "version": "A String", # The version of the entity, a strictly positive number that monotonically increases with changes to the entity. This field is set for `FULL` entity results. For missing entities in `LookupResponse`, this is the version of the snapshot that was used to look up the entity, and it is always set except for eventually consistent reads. @@ -790,7 +625,24 @@

Method Details

"blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. "booleanValue": True or False, # A boolean value. "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "entityValue": { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "key": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # The entity's key. An entity must have a key, unless otherwise documented (for example, an entity in `Value.entity_value` may have no key). An entity's kind is its key path's last element's kind, or null if it has no key. + "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. + "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. + "projectId": "A String", # The ID of the project to which the entities belong. + }, + "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. + { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. + "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. + "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + }, + ], + }, + "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. + "a_key": # Object with schema name: Value + }, + }, "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. @@ -829,7 +681,24 @@

Method Details

"blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. "booleanValue": True or False, # A boolean value. "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "entityValue": { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "key": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # The entity's key. An entity must have a key, unless otherwise documented (for example, an entity in `Value.entity_value` may have no key). An entity's kind is its key path's last element's kind, or null if it has no key. + "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. + "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. + "projectId": "A String", # The ID of the project to which the entities belong. + }, + "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. + { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. + "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. + "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + }, + ], + }, + "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. + "a_key": # Object with schema name: Value + }, + }, "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. @@ -890,7 +759,24 @@

Method Details

"blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. "booleanValue": True or False, # A boolean value. "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "entityValue": { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "key": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # The entity's key. An entity must have a key, unless otherwise documented (for example, an entity in `Value.entity_value` may have no key). An entity's kind is its key path's last element's kind, or null if it has no key. + "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. + "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. + "projectId": "A String", # The ID of the project to which the entities belong. + }, + "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. + { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. + "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. + "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + }, + ], + }, + "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. + "a_key": # Object with schema name: Value + }, + }, "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. @@ -977,40 +863,7 @@

Method Details

], }, "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. - "a_key": { # A message that can hold any of the supported value types and associated metadata. - "arrayValue": { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`. - "values": [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for 'exclude_from_indexes'. - # Object with schema name: Value - ], - }, - "blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. - "booleanValue": True or False, # A boolean value. - "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. - "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. - "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. - "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. - "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. - }, - "integerValue": "A String", # An integer value. - "keyValue": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value. - "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. - "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. - "projectId": "A String", # The ID of the project to which the entities belong. - }, - "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. - { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. - "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. - "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - }, - ], - }, - "meaning": 42, # The `meaning` field should only be populated for backwards compatibility. - "nullValue": "A String", # A null value. - "stringValue": "A String", # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes. - "timestampValue": "A String", # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down. - }, + "a_key": # Object with schema name: Value }, }, "version": "A String", # The version of the entity, a strictly positive number that monotonically increases with changes to the entity. This field is set for `FULL` entity results. For missing entities in `LookupResponse`, this is the version of the snapshot that was used to look up the entity, and it is always set except for eventually consistent reads. @@ -1049,7 +902,24 @@

Method Details

"blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. "booleanValue": True or False, # A boolean value. "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "entityValue": { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "key": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # The entity's key. An entity must have a key, unless otherwise documented (for example, an entity in `Value.entity_value` may have no key). An entity's kind is its key path's last element's kind, or null if it has no key. + "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. + "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. + "projectId": "A String", # The ID of the project to which the entities belong. + }, + "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. + { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. + "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. + "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + }, + ], + }, + "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. + "a_key": # Object with schema name: Value + }, + }, "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. diff --git a/docs/dyn/datastore_v1beta3.projects.html b/docs/dyn/datastore_v1beta3.projects.html index 5bd9b3632f1..68130019c47 100644 --- a/docs/dyn/datastore_v1beta3.projects.html +++ b/docs/dyn/datastore_v1beta3.projects.html @@ -232,7 +232,40 @@

Method Details

], }, "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. - "a_key": # Object with schema name: Value + "a_key": { # A message that can hold any of the supported value types and associated metadata. + "arrayValue": { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`. + "values": [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for 'exclude_from_indexes'. + # Object with schema name: Value + ], + }, + "blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. + "booleanValue": True or False, # A boolean value. + "doubleValue": 3.14, # A double value. + "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "keyValue": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value. + "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. + "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. + "projectId": "A String", # The ID of the project to which the entities belong. + }, + "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. + { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. + "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. + "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + }, + ], + }, + "meaning": 42, # The `meaning` field should only be populated for backwards compatibility. + "nullValue": "A String", # A null value. + "stringValue": "A String", # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes. + "timestampValue": "A String", # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down. + }, }, }, "update": { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # The entity to update. The entity must already exist. Must have a complete key path. @@ -250,7 +283,40 @@

Method Details

], }, "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. - "a_key": # Object with schema name: Value + "a_key": { # A message that can hold any of the supported value types and associated metadata. + "arrayValue": { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`. + "values": [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for 'exclude_from_indexes'. + # Object with schema name: Value + ], + }, + "blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. + "booleanValue": True or False, # A boolean value. + "doubleValue": 3.14, # A double value. + "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "keyValue": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value. + "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. + "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. + "projectId": "A String", # The ID of the project to which the entities belong. + }, + "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. + { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. + "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. + "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + }, + ], + }, + "meaning": 42, # The `meaning` field should only be populated for backwards compatibility. + "nullValue": "A String", # A null value. + "stringValue": "A String", # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes. + "timestampValue": "A String", # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down. + }, }, }, "upsert": { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # The entity to upsert. The entity may or may not already exist. The entity key's final path element may be incomplete. @@ -268,7 +334,40 @@

Method Details

], }, "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. - "a_key": # Object with schema name: Value + "a_key": { # A message that can hold any of the supported value types and associated metadata. + "arrayValue": { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`. + "values": [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for 'exclude_from_indexes'. + # Object with schema name: Value + ], + }, + "blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. + "booleanValue": True or False, # A boolean value. + "doubleValue": 3.14, # A double value. + "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "keyValue": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value. + "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. + "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. + "projectId": "A String", # The ID of the project to which the entities belong. + }, + "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. + { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. + "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. + "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + }, + ], + }, + "meaning": 42, # The `meaning` field should only be populated for backwards compatibility. + "nullValue": "A String", # A null value. + "stringValue": "A String", # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes. + "timestampValue": "A String", # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down. + }, }, }, }, @@ -381,7 +480,40 @@

Method Details

], }, "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. - "a_key": # Object with schema name: Value + "a_key": { # A message that can hold any of the supported value types and associated metadata. + "arrayValue": { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`. + "values": [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for 'exclude_from_indexes'. + # Object with schema name: Value + ], + }, + "blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. + "booleanValue": True or False, # A boolean value. + "doubleValue": 3.14, # A double value. + "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "keyValue": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value. + "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. + "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. + "projectId": "A String", # The ID of the project to which the entities belong. + }, + "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. + { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. + "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. + "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + }, + ], + }, + "meaning": 42, # The `meaning` field should only be populated for backwards compatibility. + "nullValue": "A String", # A null value. + "stringValue": "A String", # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes. + "timestampValue": "A String", # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down. + }, }, }, "version": "A String", # The version of the entity, a strictly positive number that monotonically increases with changes to the entity. This field is set for `FULL` entity results. For missing entities in `LookupResponse`, this is the version of the snapshot that was used to look up the entity, and it is always set except for eventually consistent reads. @@ -405,7 +537,40 @@

Method Details

], }, "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. - "a_key": # Object with schema name: Value + "a_key": { # A message that can hold any of the supported value types and associated metadata. + "arrayValue": { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`. + "values": [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for 'exclude_from_indexes'. + # Object with schema name: Value + ], + }, + "blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. + "booleanValue": True or False, # A boolean value. + "doubleValue": 3.14, # A double value. + "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "keyValue": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value. + "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. + "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. + "projectId": "A String", # The ID of the project to which the entities belong. + }, + "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. + { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. + "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. + "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + }, + ], + }, + "meaning": 42, # The `meaning` field should only be populated for backwards compatibility. + "nullValue": "A String", # A null value. + "stringValue": "A String", # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes. + "timestampValue": "A String", # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down. + }, }, }, "version": "A String", # The version of the entity, a strictly positive number that monotonically increases with changes to the entity. This field is set for `FULL` entity results. For missing entities in `LookupResponse`, this is the version of the snapshot that was used to look up the entity, and it is always set except for eventually consistent reads. @@ -503,24 +668,7 @@

Method Details

"blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. "booleanValue": True or False, # A boolean value. "doubleValue": 3.14, # A double value. - "entityValue": { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. - "key": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # The entity's key. An entity must have a key, unless otherwise documented (for example, an entity in `Value.entity_value` may have no key). An entity's kind is its key path's last element's kind, or null if it has no key. - "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. - "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. - "projectId": "A String", # The ID of the project to which the entities belong. - }, - "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. - { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. - "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. - "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - }, - ], - }, - "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. - "a_key": # Object with schema name: Value - }, - }, + "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. @@ -559,24 +707,7 @@

Method Details

"blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. "booleanValue": True or False, # A boolean value. "doubleValue": 3.14, # A double value. - "entityValue": { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. - "key": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # The entity's key. An entity must have a key, unless otherwise documented (for example, an entity in `Value.entity_value` may have no key). An entity's kind is its key path's last element's kind, or null if it has no key. - "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. - "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. - "projectId": "A String", # The ID of the project to which the entities belong. - }, - "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. - { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. - "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. - "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - }, - ], - }, - "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. - "a_key": # Object with schema name: Value - }, - }, + "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. @@ -637,24 +768,7 @@

Method Details

"blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. "booleanValue": True or False, # A boolean value. "doubleValue": 3.14, # A double value. - "entityValue": { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. - "key": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # The entity's key. An entity must have a key, unless otherwise documented (for example, an entity in `Value.entity_value` may have no key). An entity's kind is its key path's last element's kind, or null if it has no key. - "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. - "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. - "projectId": "A String", # The ID of the project to which the entities belong. - }, - "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. - { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. - "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. - "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - }, - ], - }, - "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. - "a_key": # Object with schema name: Value - }, - }, + "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. @@ -741,7 +855,40 @@

Method Details

], }, "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. - "a_key": # Object with schema name: Value + "a_key": { # A message that can hold any of the supported value types and associated metadata. + "arrayValue": { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`. + "values": [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for 'exclude_from_indexes'. + # Object with schema name: Value + ], + }, + "blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. + "booleanValue": True or False, # A boolean value. + "doubleValue": 3.14, # A double value. + "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "keyValue": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value. + "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. + "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. + "projectId": "A String", # The ID of the project to which the entities belong. + }, + "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. + { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. + "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. + "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + }, + ], + }, + "meaning": 42, # The `meaning` field should only be populated for backwards compatibility. + "nullValue": "A String", # A null value. + "stringValue": "A String", # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes. + "timestampValue": "A String", # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down. + }, }, }, "version": "A String", # The version of the entity, a strictly positive number that monotonically increases with changes to the entity. This field is set for `FULL` entity results. For missing entities in `LookupResponse`, this is the version of the snapshot that was used to look up the entity, and it is always set except for eventually consistent reads. @@ -780,24 +927,7 @@

Method Details

"blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. "booleanValue": True or False, # A boolean value. "doubleValue": 3.14, # A double value. - "entityValue": { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. - "key": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # The entity's key. An entity must have a key, unless otherwise documented (for example, an entity in `Value.entity_value` may have no key). An entity's kind is its key path's last element's kind, or null if it has no key. - "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. - "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. - "projectId": "A String", # The ID of the project to which the entities belong. - }, - "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. - { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. - "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. - "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - }, - ], - }, - "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. - "a_key": # Object with schema name: Value - }, - }, + "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. diff --git a/docs/dyn/dialogflow_v2beta1.projects.conversations.participants.html b/docs/dyn/dialogflow_v2beta1.projects.conversations.participants.html index 1c848046b2b..8a3755210c6 100644 --- a/docs/dyn/dialogflow_v2beta1.projects.conversations.participants.html +++ b/docs/dyn/dialogflow_v2beta1.projects.conversations.participants.html @@ -1591,6 +1591,10 @@

Method Details

"payload": { # Returns a response containing a custom, platform-specific payload. "a_key": "", # Properties of the object. }, + "telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint. + "phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164). + "sipUri": "A String", # Transfer the call to a SIP endpoint. + }, "text": { # The text response message. # Returns a text response. "text": [ # A collection of text responses. "A String", diff --git a/docs/dyn/dialogflow_v2beta1.projects.locations.conversations.participants.html b/docs/dyn/dialogflow_v2beta1.projects.locations.conversations.participants.html index 8f87ab7ad55..fbe74bbd954 100644 --- a/docs/dyn/dialogflow_v2beta1.projects.locations.conversations.participants.html +++ b/docs/dyn/dialogflow_v2beta1.projects.locations.conversations.participants.html @@ -1591,6 +1591,10 @@

Method Details

"payload": { # Returns a response containing a custom, platform-specific payload. "a_key": "", # Properties of the object. }, + "telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint. + "phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164). + "sipUri": "A String", # Transfer the call to a SIP endpoint. + }, "text": { # The text response message. # Returns a text response. "text": [ # A collection of text responses. "A String", diff --git a/docs/dyn/dialogflow_v3.projects.locations.agents.environments.deployments.html b/docs/dyn/dialogflow_v3.projects.locations.agents.environments.deployments.html new file mode 100644 index 00000000000..ca466cb26b4 --- /dev/null +++ b/docs/dyn/dialogflow_v3.projects.locations.agents.environments.deployments.html @@ -0,0 +1,174 @@ + + + +

Dialogflow API . projects . locations . agents . environments . deployments

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ get(name, x__xgafv=None)

+

Retrieves the specified Deployment.

+

+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

Returns the list of all deployments in the specified Environment.

+

+ list_next(previous_request, previous_response)

+

Retrieves the next page of results.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ get(name, x__xgafv=None) +
Retrieves the specified Deployment.
+
+Args:
+  name: string, Required. The name of the Deployment. Format: `projects//locations//agents//environments//deployments/`. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Represents an deployment in an environment. A deployment happens when a flow version configured to be active in the environment. You can configure running pre-deployment steps, e.g. running validation test cases, experiment auto-rollout, etc.
+  "endTime": "A String", # End time of this deployment.
+  "flowVersion": "A String", # The name of the flow version for this deployment. Format: projects//locations//agents//flows//versions/.
+  "name": "A String", # The name of the deployment. Format: projects//locations//agents//environments//deployments/.
+  "result": { # Result of the deployment. # Result of the deployment.
+    "deploymentTestResults": [ # Results of test cases running before the deployment. Format: `projects//locations//agents//testCases//results/`.
+      "A String",
+    ],
+    "experiment": "A String", # The name of the experiment triggered by this deployment. Format: projects//locations//agents//environments//experiments/.
+  },
+  "startTime": "A String", # Start time of this deployment.
+  "state": "A String", # The current state of the deployment.
+}
+
+ +
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None) +
Returns the list of all deployments in the specified Environment.
+
+Args:
+  parent: string, Required. The Environment to list all environments for. Format: `projects//locations//agents//environments/`. (required)
+  pageSize: integer, The maximum number of items to return in a single page. By default 20 and at most 100.
+  pageToken: string, The next_page_token value returned from a previous list request.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response message for Deployments.ListDeployments.
+  "deployments": [ # The list of deployments. There will be a maximum number of items returned based on the page_size field in the request. The list may in some cases be empty or contain fewer entries than page_size even if this isn't the last page.
+    { # Represents an deployment in an environment. A deployment happens when a flow version configured to be active in the environment. You can configure running pre-deployment steps, e.g. running validation test cases, experiment auto-rollout, etc.
+      "endTime": "A String", # End time of this deployment.
+      "flowVersion": "A String", # The name of the flow version for this deployment. Format: projects//locations//agents//flows//versions/.
+      "name": "A String", # The name of the deployment. Format: projects//locations//agents//environments//deployments/.
+      "result": { # Result of the deployment. # Result of the deployment.
+        "deploymentTestResults": [ # Results of test cases running before the deployment. Format: `projects//locations//agents//testCases//results/`.
+          "A String",
+        ],
+        "experiment": "A String", # The name of the experiment triggered by this deployment. Format: projects//locations//agents//environments//experiments/.
+      },
+      "startTime": "A String", # Start time of this deployment.
+      "state": "A String", # The current state of the deployment.
+    },
+  ],
+  "nextPageToken": "A String", # Token to retrieve the next page of results, or empty if there are no more results in the list.
+}
+
+ +
+ list_next(previous_request, previous_response) +
Retrieves the next page of results.
+
+Args:
+  previous_request: The request for the previous page. (required)
+  previous_response: The response from the request for the previous page. (required)
+
+Returns:
+  A request object that you can call 'execute()' on to request the next
+  page. Returns None if there are no more items in the collection.
+    
+
+ + \ No newline at end of file diff --git a/docs/dyn/dialogflow_v3.projects.locations.agents.environments.html b/docs/dyn/dialogflow_v3.projects.locations.agents.environments.html index 9b34fd345dc..89d53baf1f2 100644 --- a/docs/dyn/dialogflow_v3.projects.locations.agents.environments.html +++ b/docs/dyn/dialogflow_v3.projects.locations.agents.environments.html @@ -79,6 +79,11 @@

Instance Methods

Returns the continuousTestResults Resource.

+

+ deployments() +

+

Returns the deployments Resource.

+

experiments()

@@ -98,6 +103,9 @@

Instance Methods

delete(name, x__xgafv=None)

Deletes the specified Environment.

+

+ deployFlow(environment, body=None, x__xgafv=None)

+

Deploys a flow to the specified Environment. This method is a [long-running operation](https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation). The returned `Operation` type has the following method-specific fields: - `metadata`: DeployFlowMetadata - `response`: DeployFlowResponse

get(name, x__xgafv=None)

Retrieves the specified Environment.

@@ -138,6 +146,13 @@

Method Details

"description": "A String", # The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected. "displayName": "A String", # Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters. "name": "A String", # The name of the environment. Format: `projects//locations//agents//environments/`. + "testCasesConfig": { # The configuration for continuous tests. # The test cases config for continuous tests of this environment. + "enableContinuousRun": True or False, # Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day. + "enablePredeploymentRun": True or False, # Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false. + "testCases": [ # A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/` + "A String", + ], + }, "updateTime": "A String", # Output only. Update time of this environment. "versionConfigs": [ # Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned. { # Configuration for the version. @@ -193,6 +208,48 @@

Method Details

}
+
+ deployFlow(environment, body=None, x__xgafv=None) +
Deploys a flow to the specified Environment. This method is a [long-running operation](https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation). The returned `Operation` type has the following method-specific fields: - `metadata`: DeployFlowMetadata - `response`: DeployFlowResponse
+
+Args:
+  environment: string, Required. The environment to deploy the flow to. Format: `projects//locations//agents// environments/`. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The request message for Environments.DeployFlow.
+  "flowVersion": "A String", # Required. The flow version to deploy. Format: `projects//locations//agents// flows//versions/`.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+
get(name, x__xgafv=None)
Retrieves the specified Environment.
@@ -211,6 +268,13 @@ 

Method Details

"description": "A String", # The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected. "displayName": "A String", # Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters. "name": "A String", # The name of the environment. Format: `projects//locations//agents//environments/`. + "testCasesConfig": { # The configuration for continuous tests. # The test cases config for continuous tests of this environment. + "enableContinuousRun": True or False, # Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day. + "enablePredeploymentRun": True or False, # Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false. + "testCases": [ # A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/` + "A String", + ], + }, "updateTime": "A String", # Output only. Update time of this environment. "versionConfigs": [ # Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned. { # Configuration for the version. @@ -242,6 +306,13 @@

Method Details

"description": "A String", # The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected. "displayName": "A String", # Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters. "name": "A String", # The name of the environment. Format: `projects//locations//agents//environments/`. + "testCasesConfig": { # The configuration for continuous tests. # The test cases config for continuous tests of this environment. + "enableContinuousRun": True or False, # Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day. + "enablePredeploymentRun": True or False, # Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false. + "testCases": [ # A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/` + "A String", + ], + }, "updateTime": "A String", # Output only. Update time of this environment. "versionConfigs": [ # Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned. { # Configuration for the version. @@ -290,6 +361,13 @@

Method Details

"description": "A String", # The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected. "displayName": "A String", # Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters. "name": "A String", # The name of the environment. Format: `projects//locations//agents//environments/`. + "testCasesConfig": { # The configuration for continuous tests. # The test cases config for continuous tests of this environment. + "enableContinuousRun": True or False, # Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day. + "enablePredeploymentRun": True or False, # Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false. + "testCases": [ # A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/` + "A String", + ], + }, "updateTime": "A String", # Output only. Update time of this environment. "versionConfigs": [ # Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned. { # Configuration for the version. @@ -329,6 +407,13 @@

Method Details

"description": "A String", # The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected. "displayName": "A String", # Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters. "name": "A String", # The name of the environment. Format: `projects//locations//agents//environments/`. + "testCasesConfig": { # The configuration for continuous tests. # The test cases config for continuous tests of this environment. + "enableContinuousRun": True or False, # Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day. + "enablePredeploymentRun": True or False, # Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false. + "testCases": [ # A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/` + "A String", + ], + }, "updateTime": "A String", # Output only. Update time of this environment. "versionConfigs": [ # Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned. { # Configuration for the version. diff --git a/docs/dyn/dialogflow_v3.projects.locations.securitySettings.html b/docs/dyn/dialogflow_v3.projects.locations.securitySettings.html index 21c86877059..383f9fa70e8 100644 --- a/docs/dyn/dialogflow_v3.projects.locations.securitySettings.html +++ b/docs/dyn/dialogflow_v3.projects.locations.securitySettings.html @@ -117,7 +117,7 @@

Method Details

"enableInsightsExport": True or False, # If enabled, we will automatically exports conversations to Insights and Insights runs its analyzers. }, "inspectTemplate": "A String", # [DLP](https://cloud.google.com/dlp/docs) inspect template name. Use this template to define inspect base settings. If empty, we use the default DLP inspect config. The template name will have one of the following formats: `projects//locations//inspectTemplates/` OR `organizations//locations//inspectTemplates/` Note: `inspect_template` must be located in the same region as the `SecuritySettings`. - "name": "A String", # Required. Resource name of the settings. Format: `projects//locations//securitySettings/`. + "name": "A String", # Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`. "purgeDataTypes": [ # List of types of data to remove when retention settings triggers purge. "A String", ], @@ -141,7 +141,7 @@

Method Details

"enableInsightsExport": True or False, # If enabled, we will automatically exports conversations to Insights and Insights runs its analyzers. }, "inspectTemplate": "A String", # [DLP](https://cloud.google.com/dlp/docs) inspect template name. Use this template to define inspect base settings. If empty, we use the default DLP inspect config. The template name will have one of the following formats: `projects//locations//inspectTemplates/` OR `organizations//locations//inspectTemplates/` Note: `inspect_template` must be located in the same region as the `SecuritySettings`. - "name": "A String", # Required. Resource name of the settings. Format: `projects//locations//securitySettings/`. + "name": "A String", # Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`. "purgeDataTypes": [ # List of types of data to remove when retention settings triggers purge. "A String", ], @@ -190,7 +190,7 @@

Method Details

"enableInsightsExport": True or False, # If enabled, we will automatically exports conversations to Insights and Insights runs its analyzers. }, "inspectTemplate": "A String", # [DLP](https://cloud.google.com/dlp/docs) inspect template name. Use this template to define inspect base settings. If empty, we use the default DLP inspect config. The template name will have one of the following formats: `projects//locations//inspectTemplates/` OR `organizations//locations//inspectTemplates/` Note: `inspect_template` must be located in the same region as the `SecuritySettings`. - "name": "A String", # Required. Resource name of the settings. Format: `projects//locations//securitySettings/`. + "name": "A String", # Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`. "purgeDataTypes": [ # List of types of data to remove when retention settings triggers purge. "A String", ], @@ -226,7 +226,7 @@

Method Details

"enableInsightsExport": True or False, # If enabled, we will automatically exports conversations to Insights and Insights runs its analyzers. }, "inspectTemplate": "A String", # [DLP](https://cloud.google.com/dlp/docs) inspect template name. Use this template to define inspect base settings. If empty, we use the default DLP inspect config. The template name will have one of the following formats: `projects//locations//inspectTemplates/` OR `organizations//locations//inspectTemplates/` Note: `inspect_template` must be located in the same region as the `SecuritySettings`. - "name": "A String", # Required. Resource name of the settings. Format: `projects//locations//securitySettings/`. + "name": "A String", # Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`. "purgeDataTypes": [ # List of types of data to remove when retention settings triggers purge. "A String", ], @@ -257,7 +257,7 @@

Method Details

Updates the specified SecuritySettings.
 
 Args:
-  name: string, Required. Resource name of the settings. Format: `projects//locations//securitySettings/`. (required)
+  name: string, Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`. (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -268,7 +268,7 @@ 

Method Details

"enableInsightsExport": True or False, # If enabled, we will automatically exports conversations to Insights and Insights runs its analyzers. }, "inspectTemplate": "A String", # [DLP](https://cloud.google.com/dlp/docs) inspect template name. Use this template to define inspect base settings. If empty, we use the default DLP inspect config. The template name will have one of the following formats: `projects//locations//inspectTemplates/` OR `organizations//locations//inspectTemplates/` Note: `inspect_template` must be located in the same region as the `SecuritySettings`. - "name": "A String", # Required. Resource name of the settings. Format: `projects//locations//securitySettings/`. + "name": "A String", # Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`. "purgeDataTypes": [ # List of types of data to remove when retention settings triggers purge. "A String", ], @@ -293,7 +293,7 @@

Method Details

"enableInsightsExport": True or False, # If enabled, we will automatically exports conversations to Insights and Insights runs its analyzers. }, "inspectTemplate": "A String", # [DLP](https://cloud.google.com/dlp/docs) inspect template name. Use this template to define inspect base settings. If empty, we use the default DLP inspect config. The template name will have one of the following formats: `projects//locations//inspectTemplates/` OR `organizations//locations//inspectTemplates/` Note: `inspect_template` must be located in the same region as the `SecuritySettings`. - "name": "A String", # Required. Resource name of the settings. Format: `projects//locations//securitySettings/`. + "name": "A String", # Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`. "purgeDataTypes": [ # List of types of data to remove when retention settings triggers purge. "A String", ], diff --git a/docs/dyn/dialogflow_v3beta1.projects.locations.agents.environments.deployments.html b/docs/dyn/dialogflow_v3beta1.projects.locations.agents.environments.deployments.html new file mode 100644 index 00000000000..0c253617a41 --- /dev/null +++ b/docs/dyn/dialogflow_v3beta1.projects.locations.agents.environments.deployments.html @@ -0,0 +1,174 @@ + + + +

Dialogflow API . projects . locations . agents . environments . deployments

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ get(name, x__xgafv=None)

+

Retrieves the specified Deployment.

+

+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

Returns the list of all deployments in the specified Environment.

+

+ list_next(previous_request, previous_response)

+

Retrieves the next page of results.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ get(name, x__xgafv=None) +
Retrieves the specified Deployment.
+
+Args:
+  name: string, Required. The name of the Deployment. Format: `projects//locations//agents//environments//deployments/`. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Represents an deployment in an environment. A deployment happens when a flow version configured to be active in the environment. You can configure running pre-deployment steps, e.g. running validation test cases, experiment auto-rollout, etc.
+  "endTime": "A String", # End time of this deployment.
+  "flowVersion": "A String", # The name of the flow version for this deployment. Format: projects//locations//agents//flows//versions/.
+  "name": "A String", # The name of the deployment. Format: projects//locations//agents//environments//deployments/.
+  "result": { # Result of the deployment. # Result of the deployment.
+    "deploymentTestResults": [ # Results of test cases running before the deployment. Format: `projects//locations//agents//testCases//results/`.
+      "A String",
+    ],
+    "experiment": "A String", # The name of the experiment triggered by this deployment. Format: projects//locations//agents//environments//experiments/.
+  },
+  "startTime": "A String", # Start time of this deployment.
+  "state": "A String", # The current state of the deployment.
+}
+
+ +
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None) +
Returns the list of all deployments in the specified Environment.
+
+Args:
+  parent: string, Required. The Environment to list all environments for. Format: `projects//locations//agents//environments/`. (required)
+  pageSize: integer, The maximum number of items to return in a single page. By default 20 and at most 100.
+  pageToken: string, The next_page_token value returned from a previous list request.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response message for Deployments.ListDeployments.
+  "deployments": [ # The list of deployments. There will be a maximum number of items returned based on the page_size field in the request. The list may in some cases be empty or contain fewer entries than page_size even if this isn't the last page.
+    { # Represents an deployment in an environment. A deployment happens when a flow version configured to be active in the environment. You can configure running pre-deployment steps, e.g. running validation test cases, experiment auto-rollout, etc.
+      "endTime": "A String", # End time of this deployment.
+      "flowVersion": "A String", # The name of the flow version for this deployment. Format: projects//locations//agents//flows//versions/.
+      "name": "A String", # The name of the deployment. Format: projects//locations//agents//environments//deployments/.
+      "result": { # Result of the deployment. # Result of the deployment.
+        "deploymentTestResults": [ # Results of test cases running before the deployment. Format: `projects//locations//agents//testCases//results/`.
+          "A String",
+        ],
+        "experiment": "A String", # The name of the experiment triggered by this deployment. Format: projects//locations//agents//environments//experiments/.
+      },
+      "startTime": "A String", # Start time of this deployment.
+      "state": "A String", # The current state of the deployment.
+    },
+  ],
+  "nextPageToken": "A String", # Token to retrieve the next page of results, or empty if there are no more results in the list.
+}
+
+ +
+ list_next(previous_request, previous_response) +
Retrieves the next page of results.
+
+Args:
+  previous_request: The request for the previous page. (required)
+  previous_response: The response from the request for the previous page. (required)
+
+Returns:
+  A request object that you can call 'execute()' on to request the next
+  page. Returns None if there are no more items in the collection.
+    
+
+ + \ No newline at end of file diff --git a/docs/dyn/dialogflow_v3beta1.projects.locations.agents.environments.html b/docs/dyn/dialogflow_v3beta1.projects.locations.agents.environments.html index 3b30c6cc5b4..83b03ac4988 100644 --- a/docs/dyn/dialogflow_v3beta1.projects.locations.agents.environments.html +++ b/docs/dyn/dialogflow_v3beta1.projects.locations.agents.environments.html @@ -79,6 +79,11 @@

Instance Methods

Returns the continuousTestResults Resource.

+

+ deployments() +

+

Returns the deployments Resource.

+

experiments()

@@ -98,6 +103,9 @@

Instance Methods

delete(name, x__xgafv=None)

Deletes the specified Environment.

+

+ deployFlow(environment, body=None, x__xgafv=None)

+

Deploys a flow to the specified Environment. This method is a [long-running operation](https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation). The returned `Operation` type has the following method-specific fields: - `metadata`: DeployFlowMetadata - `response`: DeployFlowResponse

get(name, x__xgafv=None)

Retrieves the specified Environment.

@@ -138,6 +146,13 @@

Method Details

"description": "A String", # The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected. "displayName": "A String", # Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters. "name": "A String", # The name of the environment. Format: `projects//locations//agents//environments/`. + "testCasesConfig": { # The configuration for continuous tests. # The test cases config for continuous tests of this environment. + "enableContinuousRun": True or False, # Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day. + "enablePredeploymentRun": True or False, # Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false. + "testCases": [ # A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/` + "A String", + ], + }, "updateTime": "A String", # Output only. Update time of this environment. "versionConfigs": [ # Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned. { # Configuration for the version. @@ -193,6 +208,48 @@

Method Details

}
+
+ deployFlow(environment, body=None, x__xgafv=None) +
Deploys a flow to the specified Environment. This method is a [long-running operation](https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation). The returned `Operation` type has the following method-specific fields: - `metadata`: DeployFlowMetadata - `response`: DeployFlowResponse
+
+Args:
+  environment: string, Required. The environment to deploy the flow to. Format: `projects//locations//agents// environments/`. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The request message for Environments.DeployFlow.
+  "flowVersion": "A String", # Required. The flow version to deploy. Format: `projects//locations//agents// flows//versions/`.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+
get(name, x__xgafv=None)
Retrieves the specified Environment.
@@ -211,6 +268,13 @@ 

Method Details

"description": "A String", # The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected. "displayName": "A String", # Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters. "name": "A String", # The name of the environment. Format: `projects//locations//agents//environments/`. + "testCasesConfig": { # The configuration for continuous tests. # The test cases config for continuous tests of this environment. + "enableContinuousRun": True or False, # Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day. + "enablePredeploymentRun": True or False, # Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false. + "testCases": [ # A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/` + "A String", + ], + }, "updateTime": "A String", # Output only. Update time of this environment. "versionConfigs": [ # Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned. { # Configuration for the version. @@ -242,6 +306,13 @@

Method Details

"description": "A String", # The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected. "displayName": "A String", # Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters. "name": "A String", # The name of the environment. Format: `projects//locations//agents//environments/`. + "testCasesConfig": { # The configuration for continuous tests. # The test cases config for continuous tests of this environment. + "enableContinuousRun": True or False, # Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day. + "enablePredeploymentRun": True or False, # Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false. + "testCases": [ # A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/` + "A String", + ], + }, "updateTime": "A String", # Output only. Update time of this environment. "versionConfigs": [ # Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned. { # Configuration for the version. @@ -290,6 +361,13 @@

Method Details

"description": "A String", # The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected. "displayName": "A String", # Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters. "name": "A String", # The name of the environment. Format: `projects//locations//agents//environments/`. + "testCasesConfig": { # The configuration for continuous tests. # The test cases config for continuous tests of this environment. + "enableContinuousRun": True or False, # Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day. + "enablePredeploymentRun": True or False, # Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false. + "testCases": [ # A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/` + "A String", + ], + }, "updateTime": "A String", # Output only. Update time of this environment. "versionConfigs": [ # Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned. { # Configuration for the version. @@ -329,6 +407,13 @@

Method Details

"description": "A String", # The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected. "displayName": "A String", # Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters. "name": "A String", # The name of the environment. Format: `projects//locations//agents//environments/`. + "testCasesConfig": { # The configuration for continuous tests. # The test cases config for continuous tests of this environment. + "enableContinuousRun": True or False, # Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day. + "enablePredeploymentRun": True or False, # Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false. + "testCases": [ # A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/` + "A String", + ], + }, "updateTime": "A String", # Output only. Update time of this environment. "versionConfigs": [ # Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned. { # Configuration for the version. diff --git a/docs/dyn/dialogflow_v3beta1.projects.locations.securitySettings.html b/docs/dyn/dialogflow_v3beta1.projects.locations.securitySettings.html index be27c70e5cf..b59f19d1118 100644 --- a/docs/dyn/dialogflow_v3beta1.projects.locations.securitySettings.html +++ b/docs/dyn/dialogflow_v3beta1.projects.locations.securitySettings.html @@ -117,7 +117,7 @@

Method Details

"enableInsightsExport": True or False, # If enabled, we will automatically exports conversations to Insights and Insights runs its analyzers. }, "inspectTemplate": "A String", # [DLP](https://cloud.google.com/dlp/docs) inspect template name. Use this template to define inspect base settings. If empty, we use the default DLP inspect config. The template name will have one of the following formats: `projects//locations//inspectTemplates/` OR `organizations//locations//inspectTemplates/` Note: `inspect_template` must be located in the same region as the `SecuritySettings`. - "name": "A String", # Required. Resource name of the settings. Format: `projects//locations//securitySettings/`. + "name": "A String", # Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`. "purgeDataTypes": [ # List of types of data to remove when retention settings triggers purge. "A String", ], @@ -141,7 +141,7 @@

Method Details

"enableInsightsExport": True or False, # If enabled, we will automatically exports conversations to Insights and Insights runs its analyzers. }, "inspectTemplate": "A String", # [DLP](https://cloud.google.com/dlp/docs) inspect template name. Use this template to define inspect base settings. If empty, we use the default DLP inspect config. The template name will have one of the following formats: `projects//locations//inspectTemplates/` OR `organizations//locations//inspectTemplates/` Note: `inspect_template` must be located in the same region as the `SecuritySettings`. - "name": "A String", # Required. Resource name of the settings. Format: `projects//locations//securitySettings/`. + "name": "A String", # Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`. "purgeDataTypes": [ # List of types of data to remove when retention settings triggers purge. "A String", ], @@ -190,7 +190,7 @@

Method Details

"enableInsightsExport": True or False, # If enabled, we will automatically exports conversations to Insights and Insights runs its analyzers. }, "inspectTemplate": "A String", # [DLP](https://cloud.google.com/dlp/docs) inspect template name. Use this template to define inspect base settings. If empty, we use the default DLP inspect config. The template name will have one of the following formats: `projects//locations//inspectTemplates/` OR `organizations//locations//inspectTemplates/` Note: `inspect_template` must be located in the same region as the `SecuritySettings`. - "name": "A String", # Required. Resource name of the settings. Format: `projects//locations//securitySettings/`. + "name": "A String", # Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`. "purgeDataTypes": [ # List of types of data to remove when retention settings triggers purge. "A String", ], @@ -226,7 +226,7 @@

Method Details

"enableInsightsExport": True or False, # If enabled, we will automatically exports conversations to Insights and Insights runs its analyzers. }, "inspectTemplate": "A String", # [DLP](https://cloud.google.com/dlp/docs) inspect template name. Use this template to define inspect base settings. If empty, we use the default DLP inspect config. The template name will have one of the following formats: `projects//locations//inspectTemplates/` OR `organizations//locations//inspectTemplates/` Note: `inspect_template` must be located in the same region as the `SecuritySettings`. - "name": "A String", # Required. Resource name of the settings. Format: `projects//locations//securitySettings/`. + "name": "A String", # Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`. "purgeDataTypes": [ # List of types of data to remove when retention settings triggers purge. "A String", ], @@ -257,7 +257,7 @@

Method Details

Updates the specified SecuritySettings.
 
 Args:
-  name: string, Required. Resource name of the settings. Format: `projects//locations//securitySettings/`. (required)
+  name: string, Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`. (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -268,7 +268,7 @@ 

Method Details

"enableInsightsExport": True or False, # If enabled, we will automatically exports conversations to Insights and Insights runs its analyzers. }, "inspectTemplate": "A String", # [DLP](https://cloud.google.com/dlp/docs) inspect template name. Use this template to define inspect base settings. If empty, we use the default DLP inspect config. The template name will have one of the following formats: `projects//locations//inspectTemplates/` OR `organizations//locations//inspectTemplates/` Note: `inspect_template` must be located in the same region as the `SecuritySettings`. - "name": "A String", # Required. Resource name of the settings. Format: `projects//locations//securitySettings/`. + "name": "A String", # Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`. "purgeDataTypes": [ # List of types of data to remove when retention settings triggers purge. "A String", ], @@ -293,7 +293,7 @@

Method Details

"enableInsightsExport": True or False, # If enabled, we will automatically exports conversations to Insights and Insights runs its analyzers. }, "inspectTemplate": "A String", # [DLP](https://cloud.google.com/dlp/docs) inspect template name. Use this template to define inspect base settings. If empty, we use the default DLP inspect config. The template name will have one of the following formats: `projects//locations//inspectTemplates/` OR `organizations//locations//inspectTemplates/` Note: `inspect_template` must be located in the same region as the `SecuritySettings`. - "name": "A String", # Required. Resource name of the settings. Format: `projects//locations//securitySettings/`. + "name": "A String", # Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`. "purgeDataTypes": [ # List of types of data to remove when retention settings triggers purge. "A String", ], diff --git a/docs/dyn/documentai_v1.projects.locations.processors.processorVersions.html b/docs/dyn/documentai_v1.projects.locations.processors.processorVersions.html index 5a73e624697..fd8849ee484 100644 --- a/docs/dyn/documentai_v1.projects.locations.processors.processorVersions.html +++ b/docs/dyn/documentai_v1.projects.locations.processors.processorVersions.html @@ -281,7 +281,7 @@

Method Details

"displayName": "A String", # Display name to show to users. "entityTypes": [ # Entity types of the schema. { # EntityType is the wrapper of a label of the corresponding model with detailed attributes and limitations for entity-based processors. Multiple types can also compose a dependency tree to represent nested types. - "baseType": "A String", # Type of the entity. It must be one of the following: `document` - the entity represents a classification of a logical document. `object` - if the entity has properties it is likely an object (or or a document.) `datetime` - the entity is a date or time value. `money` - the entity represents a money value amount. `number` - the entity is a number - integer or floating point. `string` - the entity is a string value. `boolean` - the entity is a boolean value. `address` - the entity is a location address. + "baseType": "A String", # Type of the entity. It must be one of the following: `document` - the entity represents a classification of a logical document. `object` - if the entity has properties it is likely an object (or or a document.) `datetime` - the entity is a date or time value. `money` - the entity represents a money value amount. `number` - the entity is a number - integer or floating point. `string` - the entity is a string value. `boolean` - the entity is a boolean value. `address` - the entity is a location address. `duration` - the entity is a duration. "description": "A String", # Description of the entity type. "enumValues": [ # If specified, lists all the possible values for this entity. "A String", @@ -342,7 +342,7 @@

Method Details

"displayName": "A String", # Display name to show to users. "entityTypes": [ # Entity types of the schema. { # EntityType is the wrapper of a label of the corresponding model with detailed attributes and limitations for entity-based processors. Multiple types can also compose a dependency tree to represent nested types. - "baseType": "A String", # Type of the entity. It must be one of the following: `document` - the entity represents a classification of a logical document. `object` - if the entity has properties it is likely an object (or or a document.) `datetime` - the entity is a date or time value. `money` - the entity represents a money value amount. `number` - the entity is a number - integer or floating point. `string` - the entity is a string value. `boolean` - the entity is a boolean value. `address` - the entity is a location address. + "baseType": "A String", # Type of the entity. It must be one of the following: `document` - the entity represents a classification of a logical document. `object` - if the entity has properties it is likely an object (or or a document.) `datetime` - the entity is a date or time value. `money` - the entity represents a money value amount. `number` - the entity is a number - integer or floating point. `string` - the entity is a string value. `boolean` - the entity is a boolean value. `address` - the entity is a location address. `duration` - the entity is a duration. "description": "A String", # Description of the entity type. "enumValues": [ # If specified, lists all the possible values for this entity. "A String", diff --git a/docs/dyn/documentai_v1beta3.projects.locations.processors.processorVersions.html b/docs/dyn/documentai_v1beta3.projects.locations.processors.processorVersions.html index 3a7811983b2..251c172be12 100644 --- a/docs/dyn/documentai_v1beta3.projects.locations.processors.processorVersions.html +++ b/docs/dyn/documentai_v1beta3.projects.locations.processors.processorVersions.html @@ -290,7 +290,7 @@

Method Details

"displayName": "A String", # Display name to show to users. "entityTypes": [ # Entity types of the schema. { # EntityType is the wrapper of a label of the corresponding model with detailed attributes and limitations for entity-based processors. Multiple types can also compose a dependency tree to represent nested types. - "baseType": "A String", # Type of the entity. It must be one of the following: `document` - the entity represents a classification of a logical document. `object` - if the entity has properties it is likely an object (or or a document.) `datetime` - the entity is a date or time value. `money` - the entity represents a money value amount. `number` - the entity is a number - integer or floating point. `string` - the entity is a string value. `boolean` - the entity is a boolean value. `address` - the entity is a location address. + "baseType": "A String", # Type of the entity. It must be one of the following: `document` - the entity represents a classification of a logical document. `object` - if the entity has properties it is likely an object (or or a document.) `datetime` - the entity is a date or time value. `money` - the entity represents a money value amount. `number` - the entity is a number - integer or floating point. `string` - the entity is a string value. `boolean` - the entity is a boolean value. `address` - the entity is a location address. `duration` - the entity is a duration. "description": "A String", # Description of the entity type. "enumValues": [ # If specified, lists all the possible values for this entity. "A String", @@ -351,7 +351,7 @@

Method Details

"displayName": "A String", # Display name to show to users. "entityTypes": [ # Entity types of the schema. { # EntityType is the wrapper of a label of the corresponding model with detailed attributes and limitations for entity-based processors. Multiple types can also compose a dependency tree to represent nested types. - "baseType": "A String", # Type of the entity. It must be one of the following: `document` - the entity represents a classification of a logical document. `object` - if the entity has properties it is likely an object (or or a document.) `datetime` - the entity is a date or time value. `money` - the entity represents a money value amount. `number` - the entity is a number - integer or floating point. `string` - the entity is a string value. `boolean` - the entity is a boolean value. `address` - the entity is a location address. + "baseType": "A String", # Type of the entity. It must be one of the following: `document` - the entity represents a classification of a logical document. `object` - if the entity has properties it is likely an object (or or a document.) `datetime` - the entity is a date or time value. `money` - the entity represents a money value amount. `number` - the entity is a number - integer or floating point. `string` - the entity is a string value. `boolean` - the entity is a boolean value. `address` - the entity is a location address. `duration` - the entity is a duration. "description": "A String", # Description of the entity type. "enumValues": [ # If specified, lists all the possible values for this entity. "A String", diff --git a/docs/dyn/drive_v2.files.html b/docs/dyn/drive_v2.files.html index 0223f2edb32..02529bd0fe4 100644 --- a/docs/dyn/drive_v2.files.html +++ b/docs/dyn/drive_v2.files.html @@ -112,7 +112,7 @@

Instance Methods

Retrieves the next page of results.

patch(fileId, addParents=None, body=None, convert=None, enforceSingleParent=None, includePermissionsForView=None, modifiedDateBehavior=None, newRevision=None, ocr=None, ocrLanguage=None, pinned=None, removeParents=None, setModifiedDate=None, supportsAllDrives=None, supportsTeamDrives=None, timedTextLanguage=None, timedTextTrackName=None, updateViewedDate=None, useContentAsIndexableText=None)

-

Updates file metadata and/or content. This method supports patch semantics.

+

Updates a file's metadata and/or content. When calling this method, only populate fields in the request that you want to modify. When updating fields, some fields might might change automatically, such as modifiedDate. This method supports patch semantics.

touch(fileId, includePermissionsForView=None, supportsAllDrives=None, supportsTeamDrives=None)

Set the file's updated time to the current server time.

@@ -124,7 +124,7 @@

Instance Methods

Restores a file from the trash. The currently authenticated user must own the file or be at least a fileOrganizer on the parent for shared drive files. Only the owner may untrash a file.

update(fileId, addParents=None, body=None, convert=None, enforceSingleParent=None, includePermissionsForView=None, media_body=None, media_mime_type=None, modifiedDateBehavior=None, newRevision=None, ocr=None, ocrLanguage=None, pinned=None, removeParents=None, setModifiedDate=None, supportsAllDrives=None, supportsTeamDrives=None, timedTextLanguage=None, timedTextTrackName=None, updateViewedDate=None, useContentAsIndexableText=None)

-

Updates file metadata and/or content.

+

Updates a file's metadata and/or content. When calling this method, only populate fields in the request that you want to modify. When updating fields, some fields might be changed automatically, such as modifiedDate. This method supports patch semantics.

watch(fileId, acknowledgeAbuse=None, body=None, includePermissionsForView=None, projection=None, revisionId=None, supportsAllDrives=None, supportsTeamDrives=None, updateViewedDate=None)

Subscribe to changes on a file

@@ -2602,7 +2602,7 @@

Method Details

patch(fileId, addParents=None, body=None, convert=None, enforceSingleParent=None, includePermissionsForView=None, modifiedDateBehavior=None, newRevision=None, ocr=None, ocrLanguage=None, pinned=None, removeParents=None, setModifiedDate=None, supportsAllDrives=None, supportsTeamDrives=None, timedTextLanguage=None, timedTextTrackName=None, updateViewedDate=None, useContentAsIndexableText=None) -
Updates file metadata and/or content. This method supports patch semantics.
+  
Updates a file's metadata and/or content. When calling this method, only populate fields in the request that you want to modify. When updating fields, some fields might might change automatically, such as modifiedDate. This method supports patch semantics.
 
 Args:
   fileId: string, The ID of the file to update. (required)
@@ -4552,7 +4552,7 @@ 

Method Details

update(fileId, addParents=None, body=None, convert=None, enforceSingleParent=None, includePermissionsForView=None, media_body=None, media_mime_type=None, modifiedDateBehavior=None, newRevision=None, ocr=None, ocrLanguage=None, pinned=None, removeParents=None, setModifiedDate=None, supportsAllDrives=None, supportsTeamDrives=None, timedTextLanguage=None, timedTextTrackName=None, updateViewedDate=None, useContentAsIndexableText=None) -
Updates file metadata and/or content.
+  
Updates a file's metadata and/or content. When calling this method, only populate fields in the request that you want to modify. When updating fields, some fields might be changed automatically, such as modifiedDate. This method supports patch semantics.
 
 Args:
   fileId: string, The ID of the file to update. (required)
diff --git a/docs/dyn/drive_v3.files.html b/docs/dyn/drive_v3.files.html
index ea498eafba8..0df0e83845f 100644
--- a/docs/dyn/drive_v3.files.html
+++ b/docs/dyn/drive_v3.files.html
@@ -112,7 +112,7 @@ 

Instance Methods

Retrieves the next page of results.

update(fileId, addParents=None, body=None, enforceSingleParent=None, includePermissionsForView=None, keepRevisionForever=None, media_body=None, media_mime_type=None, ocrLanguage=None, removeParents=None, supportsAllDrives=None, supportsTeamDrives=None, useContentAsIndexableText=None)

-

Updates a file's metadata and/or content. This method supports patch semantics.

+

Updates a file's metadata and/or content. When calling this method, only populate fields in the request that you want to modify. When updating fields, some fields might change automatically, such as modifiedDate. This method supports patch semantics.

watch(fileId, acknowledgeAbuse=None, body=None, includePermissionsForView=None, supportsAllDrives=None, supportsTeamDrives=None)

Subscribes to changes to a file

@@ -1879,7 +1879,7 @@

Method Details

update(fileId, addParents=None, body=None, enforceSingleParent=None, includePermissionsForView=None, keepRevisionForever=None, media_body=None, media_mime_type=None, ocrLanguage=None, removeParents=None, supportsAllDrives=None, supportsTeamDrives=None, useContentAsIndexableText=None) -
Updates a file's metadata and/or content. This method supports patch semantics.
+  
Updates a file's metadata and/or content. When calling this method, only populate fields in the request that you want to modify. When updating fields, some fields might change automatically, such as modifiedDate. This method supports patch semantics.
 
 Args:
   fileId: string, The ID of the file. (required)
diff --git a/docs/dyn/firestore_v1.projects.databases.html b/docs/dyn/firestore_v1.projects.databases.html
index 3e7fa3fc3d6..3ee210ed01c 100644
--- a/docs/dyn/firestore_v1.projects.databases.html
+++ b/docs/dyn/firestore_v1.projects.databases.html
@@ -95,9 +95,18 @@ 

Instance Methods

exportDocuments(name, body=None, x__xgafv=None)

Exports a copy of all or a subset of documents from Google Cloud Firestore to another storage system, such as Google Cloud Storage. Recent updates to documents may not be reflected in the export. The export occurs in the background and its progress can be monitored and managed via the Operation resource that is created. The output of an export may only be used once the associated operation is done. If an export operation is cancelled before completion it may leave partial data behind in Google Cloud Storage. For more details on export behavior and output format, refer to: https://cloud.google.com/firestore/docs/manage-data/export-import

+

+ get(name, x__xgafv=None)

+

Gets information about a database.

importDocuments(name, body=None, x__xgafv=None)

Imports documents into Google Cloud Firestore. Existing documents with the same name are overwritten. The import occurs in the background and its progress can be monitored and managed via the Operation resource that is created. If an ImportDocuments operation is cancelled, it is possible that a subset of the data has already been imported to Cloud Firestore.

+

+ list(parent, x__xgafv=None)

+

List all the databases in the project.

+

+ patch(name, body=None, updateMask=None, x__xgafv=None)

+

Updates a database.

Method Details

close() @@ -149,6 +158,29 @@

Method Details

}
+
+ get(name, x__xgafv=None) +
Gets information about a database.
+
+Args:
+  name: string, Required. A name of the form `projects/{project_id}/databases/{database_id}` (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A Cloud Firestore Database in Native Mode. Currently one database is allowed per cloud project. It is named '(default)'
+  "concurrencyMode": "A String", # The concurrency control mode to use for this database.
+  "etag": "A String", # This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
+  "locationId": "A String", # The location of the database. Available databases are listed at https://cloud.google.com/firestore/docs/locations.
+  "name": "A String", # The resource name of the Database. Format: `projects/{project}/databases/{database}`
+  "type": "A String", # The type of the database. See https://cloud.google.com/datastore/docs/firestore-or-datastore for information about how to choose.
+}
+
+
importDocuments(name, body=None, x__xgafv=None)
Imports documents into Google Cloud Firestore. Existing documents with the same name are overwritten. The import occurs in the background and its progress can be monitored and managed via the Operation resource that is created. If an ImportDocuments operation is cancelled, it is possible that a subset of the data has already been imported to Cloud Firestore.
@@ -194,4 +226,78 @@ 

Method Details

}
+
+ list(parent, x__xgafv=None) +
List all the databases in the project.
+
+Args:
+  parent: string, Required. A parent name of the form `projects/{project_id}` (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The list of databases for a project.
+  "databases": [ # The databases in the project.
+    { # A Cloud Firestore Database in Native Mode. Currently one database is allowed per cloud project. It is named '(default)'
+      "concurrencyMode": "A String", # The concurrency control mode to use for this database.
+      "etag": "A String", # This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
+      "locationId": "A String", # The location of the database. Available databases are listed at https://cloud.google.com/firestore/docs/locations.
+      "name": "A String", # The resource name of the Database. Format: `projects/{project}/databases/{database}`
+      "type": "A String", # The type of the database. See https://cloud.google.com/datastore/docs/firestore-or-datastore for information about how to choose.
+    },
+  ],
+}
+
+ +
+ patch(name, body=None, updateMask=None, x__xgafv=None) +
Updates a database.
+
+Args:
+  name: string, The resource name of the Database. Format: `projects/{project}/databases/{database}` (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # A Cloud Firestore Database in Native Mode. Currently one database is allowed per cloud project. It is named '(default)'
+  "concurrencyMode": "A String", # The concurrency control mode to use for this database.
+  "etag": "A String", # This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
+  "locationId": "A String", # The location of the database. Available databases are listed at https://cloud.google.com/firestore/docs/locations.
+  "name": "A String", # The resource name of the Database. Format: `projects/{project}/databases/{database}`
+  "type": "A String", # The type of the database. See https://cloud.google.com/datastore/docs/firestore-or-datastore for information about how to choose.
+}
+
+  updateMask: string, The list of fields to be updated.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ \ No newline at end of file diff --git a/docs/dyn/fitness_v1.users.dataset.html b/docs/dyn/fitness_v1.users.dataset.html index ff83b61121c..77dd482437a 100644 --- a/docs/dyn/fitness_v1.users.dataset.html +++ b/docs/dyn/fitness_v1.users.dataset.html @@ -116,7 +116,7 @@

Method Details

"value": 42, }, }, - "endTimeMillis": "A String", # The end of a window of time. Data that intersects with this time window will be aggregated. The time is in milliseconds since epoch, inclusive. + "endTimeMillis": "A String", # The end of a window of time. Data that intersects with this time window will be aggregated. The time is in milliseconds since epoch, inclusive. The maximum allowed difference between start_time_millis // and end_time_millis is 7776000000 (roughly 90 days). "filteredDataQualityStandard": [ # DO NOT POPULATE THIS FIELD. It is ignored. "A String", ], diff --git a/docs/dyn/gameservices_v1.projects.locations.gameServerDeployments.html b/docs/dyn/gameservices_v1.projects.locations.gameServerDeployments.html index 8c5d9e58d3a..1159eff2d47 100644 --- a/docs/dyn/gameservices_v1.projects.locations.gameServerDeployments.html +++ b/docs/dyn/gameservices_v1.projects.locations.gameServerDeployments.html @@ -364,7 +364,7 @@

Method Details

"in": [ # If one or more 'in' clauses are specified, the rule matches if the PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries. "A String", ], - "logConfig": [ # The config returned to callers of tech.iam.IAM.CheckPolicy for any entries that match the LOG action. + "logConfig": [ # The config returned to callers of CheckPolicy for any entries that match the LOG action. { # Specifies what kind of log the caller must write "cloudAudit": { # Write a Cloud Audit log # Cloud audit options. "authorizationLoggingOptions": { # Authorization-related information used by Cloud Audit Logging. # Information used by the Cloud Audit Logging pipeline. @@ -668,7 +668,7 @@

Method Details

"in": [ # If one or more 'in' clauses are specified, the rule matches if the PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries. "A String", ], - "logConfig": [ # The config returned to callers of tech.iam.IAM.CheckPolicy for any entries that match the LOG action. + "logConfig": [ # The config returned to callers of CheckPolicy for any entries that match the LOG action. { # Specifies what kind of log the caller must write "cloudAudit": { # Write a Cloud Audit log # Cloud audit options. "authorizationLoggingOptions": { # Authorization-related information used by Cloud Audit Logging. # Information used by the Cloud Audit Logging pipeline. @@ -765,7 +765,7 @@

Method Details

"in": [ # If one or more 'in' clauses are specified, the rule matches if the PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries. "A String", ], - "logConfig": [ # The config returned to callers of tech.iam.IAM.CheckPolicy for any entries that match the LOG action. + "logConfig": [ # The config returned to callers of CheckPolicy for any entries that match the LOG action. { # Specifies what kind of log the caller must write "cloudAudit": { # Write a Cloud Audit log # Cloud audit options. "authorizationLoggingOptions": { # Authorization-related information used by Cloud Audit Logging. # Information used by the Cloud Audit Logging pipeline. diff --git a/docs/dyn/gkehub_v1.projects.locations.features.html b/docs/dyn/gkehub_v1.projects.locations.features.html index 4309d22df84..89b39055e3f 100644 --- a/docs/dyn/gkehub_v1.projects.locations.features.html +++ b/docs/dyn/gkehub_v1.projects.locations.features.html @@ -132,13 +132,26 @@

Method Details

"git": { # Git repo configuration for a single cluster. # Git repo configuration for the cluster. "gcpServiceAccountEmail": "A String", # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount. "httpsProxy": "A String", # URL for the HTTPS proxy to be used when communicating with the Git repo. + "noSslVerify": True or False, # Enable or disable the SSL certificate verification Default: false. "policyDir": "A String", # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. "secretType": "A String", # Type of secret configured for access to the Git repo. "syncBranch": "A String", # The branch of the repository to sync from. Default: master. + "syncDepth": "A String", # The depth of git commits synced by the git-sync container. "syncRepo": "A String", # The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, + "resourceRequirements": { # Specifies CPU and memory limits for containers, keyed by container name + "a_key": { # ResourceRequirements allows to override the CPU and memory resource requirements of a container. + "containerName": "A String", # Name of the container + "cpuLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + "memoryLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + }, + }, "sourceFormat": "A String", # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode. }, "hierarchyController": { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster. @@ -166,6 +179,7 @@

Method Details

"clusterName": "A String", # The user-defined name for the cluster used by ClusterSelectors to group clusters together. This should match Membership's membership_name, unless the user installed ACM on the cluster manually prior to enabling the ACM hub feature. Unique within a Anthos Config Management installation. "configSyncState": { # State information for ConfigSync # Current sync status "deploymentState": { # The state of ConfigSync's deployment on a cluster # Information about the deployment of ConfigSync, including the version of the various Pods deployed + "admissionWebhook": "A String", # Deployment state of admission-webhook "gitSync": "A String", # Deployment state of the git-sync pod "importer": "A String", # Deployment state of the importer pod "monitor": "A String", # Deployment state of the monitor pod @@ -200,6 +214,7 @@

Method Details

"syncToken": "A String", # Token indicating the state of the syncer. }, "version": { # Specific versioning information pertaining to ConfigSync's Pods # The version of ConfigSync deployed + "admissionWebhook": "A String", # Version of the deployed admission_webhook pod "gitSync": "A String", # Version of the deployed git-sync pod "importer": "A String", # Version of the deployed importer pod "monitor": "A String", # Version of the deployed monitor pod @@ -223,13 +238,26 @@

Method Details

"git": { # Git repo configuration for a single cluster. # Git repo configuration for the cluster. "gcpServiceAccountEmail": "A String", # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount. "httpsProxy": "A String", # URL for the HTTPS proxy to be used when communicating with the Git repo. + "noSslVerify": True or False, # Enable or disable the SSL certificate verification Default: false. "policyDir": "A String", # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. "secretType": "A String", # Type of secret configured for access to the Git repo. "syncBranch": "A String", # The branch of the repository to sync from. Default: master. + "syncDepth": "A String", # The depth of git commits synced by the git-sync container. "syncRepo": "A String", # The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, + "resourceRequirements": { # Specifies CPU and memory limits for containers, keyed by container name + "a_key": { # ResourceRequirements allows to override the CPU and memory resource requirements of a container. + "containerName": "A String", # Name of the container + "cpuLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + "memoryLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + }, + }, "sourceFormat": "A String", # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode. }, "hierarchyController": { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster. @@ -389,13 +417,26 @@

Method Details

"git": { # Git repo configuration for a single cluster. # Git repo configuration for the cluster. "gcpServiceAccountEmail": "A String", # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount. "httpsProxy": "A String", # URL for the HTTPS proxy to be used when communicating with the Git repo. + "noSslVerify": True or False, # Enable or disable the SSL certificate verification Default: false. "policyDir": "A String", # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. "secretType": "A String", # Type of secret configured for access to the Git repo. "syncBranch": "A String", # The branch of the repository to sync from. Default: master. + "syncDepth": "A String", # The depth of git commits synced by the git-sync container. "syncRepo": "A String", # The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, + "resourceRequirements": { # Specifies CPU and memory limits for containers, keyed by container name + "a_key": { # ResourceRequirements allows to override the CPU and memory resource requirements of a container. + "containerName": "A String", # Name of the container + "cpuLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + "memoryLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + }, + }, "sourceFormat": "A String", # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode. }, "hierarchyController": { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster. @@ -423,6 +464,7 @@

Method Details

"clusterName": "A String", # The user-defined name for the cluster used by ClusterSelectors to group clusters together. This should match Membership's membership_name, unless the user installed ACM on the cluster manually prior to enabling the ACM hub feature. Unique within a Anthos Config Management installation. "configSyncState": { # State information for ConfigSync # Current sync status "deploymentState": { # The state of ConfigSync's deployment on a cluster # Information about the deployment of ConfigSync, including the version of the various Pods deployed + "admissionWebhook": "A String", # Deployment state of admission-webhook "gitSync": "A String", # Deployment state of the git-sync pod "importer": "A String", # Deployment state of the importer pod "monitor": "A String", # Deployment state of the monitor pod @@ -457,6 +499,7 @@

Method Details

"syncToken": "A String", # Token indicating the state of the syncer. }, "version": { # Specific versioning information pertaining to ConfigSync's Pods # The version of ConfigSync deployed + "admissionWebhook": "A String", # Version of the deployed admission_webhook pod "gitSync": "A String", # Version of the deployed git-sync pod "importer": "A String", # Version of the deployed importer pod "monitor": "A String", # Version of the deployed monitor pod @@ -480,13 +523,26 @@

Method Details

"git": { # Git repo configuration for a single cluster. # Git repo configuration for the cluster. "gcpServiceAccountEmail": "A String", # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount. "httpsProxy": "A String", # URL for the HTTPS proxy to be used when communicating with the Git repo. + "noSslVerify": True or False, # Enable or disable the SSL certificate verification Default: false. "policyDir": "A String", # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. "secretType": "A String", # Type of secret configured for access to the Git repo. "syncBranch": "A String", # The branch of the repository to sync from. Default: master. + "syncDepth": "A String", # The depth of git commits synced by the git-sync container. "syncRepo": "A String", # The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, + "resourceRequirements": { # Specifies CPU and memory limits for containers, keyed by container name + "a_key": { # ResourceRequirements allows to override the CPU and memory resource requirements of a container. + "containerName": "A String", # Name of the container + "cpuLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + "memoryLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + }, + }, "sourceFormat": "A String", # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode. }, "hierarchyController": { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster. @@ -634,13 +690,26 @@

Method Details

"git": { # Git repo configuration for a single cluster. # Git repo configuration for the cluster. "gcpServiceAccountEmail": "A String", # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount. "httpsProxy": "A String", # URL for the HTTPS proxy to be used when communicating with the Git repo. + "noSslVerify": True or False, # Enable or disable the SSL certificate verification Default: false. "policyDir": "A String", # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. "secretType": "A String", # Type of secret configured for access to the Git repo. "syncBranch": "A String", # The branch of the repository to sync from. Default: master. + "syncDepth": "A String", # The depth of git commits synced by the git-sync container. "syncRepo": "A String", # The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, + "resourceRequirements": { # Specifies CPU and memory limits for containers, keyed by container name + "a_key": { # ResourceRequirements allows to override the CPU and memory resource requirements of a container. + "containerName": "A String", # Name of the container + "cpuLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + "memoryLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + }, + }, "sourceFormat": "A String", # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode. }, "hierarchyController": { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster. @@ -668,6 +737,7 @@

Method Details

"clusterName": "A String", # The user-defined name for the cluster used by ClusterSelectors to group clusters together. This should match Membership's membership_name, unless the user installed ACM on the cluster manually prior to enabling the ACM hub feature. Unique within a Anthos Config Management installation. "configSyncState": { # State information for ConfigSync # Current sync status "deploymentState": { # The state of ConfigSync's deployment on a cluster # Information about the deployment of ConfigSync, including the version of the various Pods deployed + "admissionWebhook": "A String", # Deployment state of admission-webhook "gitSync": "A String", # Deployment state of the git-sync pod "importer": "A String", # Deployment state of the importer pod "monitor": "A String", # Deployment state of the monitor pod @@ -702,6 +772,7 @@

Method Details

"syncToken": "A String", # Token indicating the state of the syncer. }, "version": { # Specific versioning information pertaining to ConfigSync's Pods # The version of ConfigSync deployed + "admissionWebhook": "A String", # Version of the deployed admission_webhook pod "gitSync": "A String", # Version of the deployed git-sync pod "importer": "A String", # Version of the deployed importer pod "monitor": "A String", # Version of the deployed monitor pod @@ -725,13 +796,26 @@

Method Details

"git": { # Git repo configuration for a single cluster. # Git repo configuration for the cluster. "gcpServiceAccountEmail": "A String", # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount. "httpsProxy": "A String", # URL for the HTTPS proxy to be used when communicating with the Git repo. + "noSslVerify": True or False, # Enable or disable the SSL certificate verification Default: false. "policyDir": "A String", # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. "secretType": "A String", # Type of secret configured for access to the Git repo. "syncBranch": "A String", # The branch of the repository to sync from. Default: master. + "syncDepth": "A String", # The depth of git commits synced by the git-sync container. "syncRepo": "A String", # The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, + "resourceRequirements": { # Specifies CPU and memory limits for containers, keyed by container name + "a_key": { # ResourceRequirements allows to override the CPU and memory resource requirements of a container. + "containerName": "A String", # Name of the container + "cpuLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + "memoryLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + }, + }, "sourceFormat": "A String", # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode. }, "hierarchyController": { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster. @@ -835,13 +919,26 @@

Method Details

"git": { # Git repo configuration for a single cluster. # Git repo configuration for the cluster. "gcpServiceAccountEmail": "A String", # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount. "httpsProxy": "A String", # URL for the HTTPS proxy to be used when communicating with the Git repo. + "noSslVerify": True or False, # Enable or disable the SSL certificate verification Default: false. "policyDir": "A String", # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. "secretType": "A String", # Type of secret configured for access to the Git repo. "syncBranch": "A String", # The branch of the repository to sync from. Default: master. + "syncDepth": "A String", # The depth of git commits synced by the git-sync container. "syncRepo": "A String", # The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, + "resourceRequirements": { # Specifies CPU and memory limits for containers, keyed by container name + "a_key": { # ResourceRequirements allows to override the CPU and memory resource requirements of a container. + "containerName": "A String", # Name of the container + "cpuLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + "memoryLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + }, + }, "sourceFormat": "A String", # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode. }, "hierarchyController": { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster. @@ -869,6 +966,7 @@

Method Details

"clusterName": "A String", # The user-defined name for the cluster used by ClusterSelectors to group clusters together. This should match Membership's membership_name, unless the user installed ACM on the cluster manually prior to enabling the ACM hub feature. Unique within a Anthos Config Management installation. "configSyncState": { # State information for ConfigSync # Current sync status "deploymentState": { # The state of ConfigSync's deployment on a cluster # Information about the deployment of ConfigSync, including the version of the various Pods deployed + "admissionWebhook": "A String", # Deployment state of admission-webhook "gitSync": "A String", # Deployment state of the git-sync pod "importer": "A String", # Deployment state of the importer pod "monitor": "A String", # Deployment state of the monitor pod @@ -903,6 +1001,7 @@

Method Details

"syncToken": "A String", # Token indicating the state of the syncer. }, "version": { # Specific versioning information pertaining to ConfigSync's Pods # The version of ConfigSync deployed + "admissionWebhook": "A String", # Version of the deployed admission_webhook pod "gitSync": "A String", # Version of the deployed git-sync pod "importer": "A String", # Version of the deployed importer pod "monitor": "A String", # Version of the deployed monitor pod @@ -926,13 +1025,26 @@

Method Details

"git": { # Git repo configuration for a single cluster. # Git repo configuration for the cluster. "gcpServiceAccountEmail": "A String", # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount. "httpsProxy": "A String", # URL for the HTTPS proxy to be used when communicating with the Git repo. + "noSslVerify": True or False, # Enable or disable the SSL certificate verification Default: false. "policyDir": "A String", # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. "secretType": "A String", # Type of secret configured for access to the Git repo. "syncBranch": "A String", # The branch of the repository to sync from. Default: master. + "syncDepth": "A String", # The depth of git commits synced by the git-sync container. "syncRepo": "A String", # The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, + "resourceRequirements": { # Specifies CPU and memory limits for containers, keyed by container name + "a_key": { # ResourceRequirements allows to override the CPU and memory resource requirements of a container. + "containerName": "A String", # Name of the container + "cpuLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + "memoryLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + }, + }, "sourceFormat": "A String", # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode. }, "hierarchyController": { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster. diff --git a/docs/dyn/gkehub_v1alpha.projects.locations.features.html b/docs/dyn/gkehub_v1alpha.projects.locations.features.html index 1be2f78ec1a..a732ed28064 100644 --- a/docs/dyn/gkehub_v1alpha.projects.locations.features.html +++ b/docs/dyn/gkehub_v1alpha.projects.locations.features.html @@ -135,13 +135,26 @@

Method Details

"git": { # Git repo configuration for a single cluster. # Git repo configuration for the cluster. "gcpServiceAccountEmail": "A String", # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount. "httpsProxy": "A String", # URL for the HTTPS proxy to be used when communicating with the Git repo. + "noSslVerify": True or False, # Enable or disable the SSL certificate verification Default: false. "policyDir": "A String", # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. "secretType": "A String", # Type of secret configured for access to the Git repo. "syncBranch": "A String", # The branch of the repository to sync from. Default: master. + "syncDepth": "A String", # The depth of git commits synced by the git-sync container. "syncRepo": "A String", # The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, + "resourceRequirements": { # Specifies CPU and memory limits for containers, keyed by container name + "a_key": { # ResourceRequirements allows to override the CPU and memory resource requirements of a container. + "containerName": "A String", # Name of the container + "cpuLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + "memoryLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + }, + }, "sourceFormat": "A String", # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode. }, "hierarchyController": { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster. @@ -197,6 +210,7 @@

Method Details

"clusterName": "A String", # The user-defined name for the cluster used by ClusterSelectors to group clusters together. This should match Membership's membership_name, unless the user installed ACM on the cluster manually prior to enabling the ACM hub feature. Unique within a Anthos Config Management installation. "configSyncState": { # State information for ConfigSync # Current sync status "deploymentState": { # The state of ConfigSync's deployment on a cluster # Information about the deployment of ConfigSync, including the version of the various Pods deployed + "admissionWebhook": "A String", # Deployment state of admission-webhook "gitSync": "A String", # Deployment state of the git-sync pod "importer": "A String", # Deployment state of the importer pod "monitor": "A String", # Deployment state of the monitor pod @@ -231,6 +245,7 @@

Method Details

"syncToken": "A String", # Token indicating the state of the syncer. }, "version": { # Specific versioning information pertaining to ConfigSync's Pods # The version of ConfigSync deployed + "admissionWebhook": "A String", # Version of the deployed admission_webhook pod "gitSync": "A String", # Version of the deployed git-sync pod "importer": "A String", # Version of the deployed importer pod "monitor": "A String", # Version of the deployed monitor pod @@ -257,13 +272,26 @@

Method Details

"git": { # Git repo configuration for a single cluster. # Git repo configuration for the cluster. "gcpServiceAccountEmail": "A String", # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount. "httpsProxy": "A String", # URL for the HTTPS proxy to be used when communicating with the Git repo. + "noSslVerify": True or False, # Enable or disable the SSL certificate verification Default: false. "policyDir": "A String", # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. "secretType": "A String", # Type of secret configured for access to the Git repo. "syncBranch": "A String", # The branch of the repository to sync from. Default: master. + "syncDepth": "A String", # The depth of git commits synced by the git-sync container. "syncRepo": "A String", # The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, + "resourceRequirements": { # Specifies CPU and memory limits for containers, keyed by container name + "a_key": { # ResourceRequirements allows to override the CPU and memory resource requirements of a container. + "containerName": "A String", # Name of the container + "cpuLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + "memoryLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + }, + }, "sourceFormat": "A String", # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode. }, "hierarchyController": { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster. @@ -506,13 +534,26 @@

Method Details

"git": { # Git repo configuration for a single cluster. # Git repo configuration for the cluster. "gcpServiceAccountEmail": "A String", # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount. "httpsProxy": "A String", # URL for the HTTPS proxy to be used when communicating with the Git repo. + "noSslVerify": True or False, # Enable or disable the SSL certificate verification Default: false. "policyDir": "A String", # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. "secretType": "A String", # Type of secret configured for access to the Git repo. "syncBranch": "A String", # The branch of the repository to sync from. Default: master. + "syncDepth": "A String", # The depth of git commits synced by the git-sync container. "syncRepo": "A String", # The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, + "resourceRequirements": { # Specifies CPU and memory limits for containers, keyed by container name + "a_key": { # ResourceRequirements allows to override the CPU and memory resource requirements of a container. + "containerName": "A String", # Name of the container + "cpuLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + "memoryLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + }, + }, "sourceFormat": "A String", # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode. }, "hierarchyController": { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster. @@ -568,6 +609,7 @@

Method Details

"clusterName": "A String", # The user-defined name for the cluster used by ClusterSelectors to group clusters together. This should match Membership's membership_name, unless the user installed ACM on the cluster manually prior to enabling the ACM hub feature. Unique within a Anthos Config Management installation. "configSyncState": { # State information for ConfigSync # Current sync status "deploymentState": { # The state of ConfigSync's deployment on a cluster # Information about the deployment of ConfigSync, including the version of the various Pods deployed + "admissionWebhook": "A String", # Deployment state of admission-webhook "gitSync": "A String", # Deployment state of the git-sync pod "importer": "A String", # Deployment state of the importer pod "monitor": "A String", # Deployment state of the monitor pod @@ -602,6 +644,7 @@

Method Details

"syncToken": "A String", # Token indicating the state of the syncer. }, "version": { # Specific versioning information pertaining to ConfigSync's Pods # The version of ConfigSync deployed + "admissionWebhook": "A String", # Version of the deployed admission_webhook pod "gitSync": "A String", # Version of the deployed git-sync pod "importer": "A String", # Version of the deployed importer pod "monitor": "A String", # Version of the deployed monitor pod @@ -628,13 +671,26 @@

Method Details

"git": { # Git repo configuration for a single cluster. # Git repo configuration for the cluster. "gcpServiceAccountEmail": "A String", # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount. "httpsProxy": "A String", # URL for the HTTPS proxy to be used when communicating with the Git repo. + "noSslVerify": True or False, # Enable or disable the SSL certificate verification Default: false. "policyDir": "A String", # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. "secretType": "A String", # Type of secret configured for access to the Git repo. "syncBranch": "A String", # The branch of the repository to sync from. Default: master. + "syncDepth": "A String", # The depth of git commits synced by the git-sync container. "syncRepo": "A String", # The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, + "resourceRequirements": { # Specifies CPU and memory limits for containers, keyed by container name + "a_key": { # ResourceRequirements allows to override the CPU and memory resource requirements of a container. + "containerName": "A String", # Name of the container + "cpuLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + "memoryLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + }, + }, "sourceFormat": "A String", # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode. }, "hierarchyController": { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster. @@ -865,13 +921,26 @@

Method Details

"git": { # Git repo configuration for a single cluster. # Git repo configuration for the cluster. "gcpServiceAccountEmail": "A String", # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount. "httpsProxy": "A String", # URL for the HTTPS proxy to be used when communicating with the Git repo. + "noSslVerify": True or False, # Enable or disable the SSL certificate verification Default: false. "policyDir": "A String", # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. "secretType": "A String", # Type of secret configured for access to the Git repo. "syncBranch": "A String", # The branch of the repository to sync from. Default: master. + "syncDepth": "A String", # The depth of git commits synced by the git-sync container. "syncRepo": "A String", # The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, + "resourceRequirements": { # Specifies CPU and memory limits for containers, keyed by container name + "a_key": { # ResourceRequirements allows to override the CPU and memory resource requirements of a container. + "containerName": "A String", # Name of the container + "cpuLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + "memoryLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + }, + }, "sourceFormat": "A String", # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode. }, "hierarchyController": { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster. @@ -927,6 +996,7 @@

Method Details

"clusterName": "A String", # The user-defined name for the cluster used by ClusterSelectors to group clusters together. This should match Membership's membership_name, unless the user installed ACM on the cluster manually prior to enabling the ACM hub feature. Unique within a Anthos Config Management installation. "configSyncState": { # State information for ConfigSync # Current sync status "deploymentState": { # The state of ConfigSync's deployment on a cluster # Information about the deployment of ConfigSync, including the version of the various Pods deployed + "admissionWebhook": "A String", # Deployment state of admission-webhook "gitSync": "A String", # Deployment state of the git-sync pod "importer": "A String", # Deployment state of the importer pod "monitor": "A String", # Deployment state of the monitor pod @@ -961,6 +1031,7 @@

Method Details

"syncToken": "A String", # Token indicating the state of the syncer. }, "version": { # Specific versioning information pertaining to ConfigSync's Pods # The version of ConfigSync deployed + "admissionWebhook": "A String", # Version of the deployed admission_webhook pod "gitSync": "A String", # Version of the deployed git-sync pod "importer": "A String", # Version of the deployed importer pod "monitor": "A String", # Version of the deployed monitor pod @@ -987,13 +1058,26 @@

Method Details

"git": { # Git repo configuration for a single cluster. # Git repo configuration for the cluster. "gcpServiceAccountEmail": "A String", # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount. "httpsProxy": "A String", # URL for the HTTPS proxy to be used when communicating with the Git repo. + "noSslVerify": True or False, # Enable or disable the SSL certificate verification Default: false. "policyDir": "A String", # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. "secretType": "A String", # Type of secret configured for access to the Git repo. "syncBranch": "A String", # The branch of the repository to sync from. Default: master. + "syncDepth": "A String", # The depth of git commits synced by the git-sync container. "syncRepo": "A String", # The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, + "resourceRequirements": { # Specifies CPU and memory limits for containers, keyed by container name + "a_key": { # ResourceRequirements allows to override the CPU and memory resource requirements of a container. + "containerName": "A String", # Name of the container + "cpuLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + "memoryLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + }, + }, "sourceFormat": "A String", # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode. }, "hierarchyController": { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster. @@ -1180,13 +1264,26 @@

Method Details

"git": { # Git repo configuration for a single cluster. # Git repo configuration for the cluster. "gcpServiceAccountEmail": "A String", # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount. "httpsProxy": "A String", # URL for the HTTPS proxy to be used when communicating with the Git repo. + "noSslVerify": True or False, # Enable or disable the SSL certificate verification Default: false. "policyDir": "A String", # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. "secretType": "A String", # Type of secret configured for access to the Git repo. "syncBranch": "A String", # The branch of the repository to sync from. Default: master. + "syncDepth": "A String", # The depth of git commits synced by the git-sync container. "syncRepo": "A String", # The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, + "resourceRequirements": { # Specifies CPU and memory limits for containers, keyed by container name + "a_key": { # ResourceRequirements allows to override the CPU and memory resource requirements of a container. + "containerName": "A String", # Name of the container + "cpuLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + "memoryLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + }, + }, "sourceFormat": "A String", # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode. }, "hierarchyController": { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster. @@ -1242,6 +1339,7 @@

Method Details

"clusterName": "A String", # The user-defined name for the cluster used by ClusterSelectors to group clusters together. This should match Membership's membership_name, unless the user installed ACM on the cluster manually prior to enabling the ACM hub feature. Unique within a Anthos Config Management installation. "configSyncState": { # State information for ConfigSync # Current sync status "deploymentState": { # The state of ConfigSync's deployment on a cluster # Information about the deployment of ConfigSync, including the version of the various Pods deployed + "admissionWebhook": "A String", # Deployment state of admission-webhook "gitSync": "A String", # Deployment state of the git-sync pod "importer": "A String", # Deployment state of the importer pod "monitor": "A String", # Deployment state of the monitor pod @@ -1276,6 +1374,7 @@

Method Details

"syncToken": "A String", # Token indicating the state of the syncer. }, "version": { # Specific versioning information pertaining to ConfigSync's Pods # The version of ConfigSync deployed + "admissionWebhook": "A String", # Version of the deployed admission_webhook pod "gitSync": "A String", # Version of the deployed git-sync pod "importer": "A String", # Version of the deployed importer pod "monitor": "A String", # Version of the deployed monitor pod @@ -1302,13 +1401,26 @@

Method Details

"git": { # Git repo configuration for a single cluster. # Git repo configuration for the cluster. "gcpServiceAccountEmail": "A String", # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount. "httpsProxy": "A String", # URL for the HTTPS proxy to be used when communicating with the Git repo. + "noSslVerify": True or False, # Enable or disable the SSL certificate verification Default: false. "policyDir": "A String", # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. "secretType": "A String", # Type of secret configured for access to the Git repo. "syncBranch": "A String", # The branch of the repository to sync from. Default: master. + "syncDepth": "A String", # The depth of git commits synced by the git-sync container. "syncRepo": "A String", # The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, + "resourceRequirements": { # Specifies CPU and memory limits for containers, keyed by container name + "a_key": { # ResourceRequirements allows to override the CPU and memory resource requirements of a container. + "containerName": "A String", # Name of the container + "cpuLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + "memoryLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + }, + }, "sourceFormat": "A String", # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode. }, "hierarchyController": { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster. diff --git a/docs/dyn/gkehub_v1beta.projects.locations.features.html b/docs/dyn/gkehub_v1beta.projects.locations.features.html index 7fe99b66fd2..73fe6474123 100644 --- a/docs/dyn/gkehub_v1beta.projects.locations.features.html +++ b/docs/dyn/gkehub_v1beta.projects.locations.features.html @@ -135,13 +135,26 @@

Method Details

"git": { # Git repo configuration for a single cluster. # Git repo configuration for the cluster. "gcpServiceAccountEmail": "A String", # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount. "httpsProxy": "A String", # URL for the HTTPS proxy to be used when communicating with the Git repo. + "noSslVerify": True or False, # Enable or disable the SSL certificate verification Default: false. "policyDir": "A String", # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. "secretType": "A String", # Type of secret configured for access to the Git repo. "syncBranch": "A String", # The branch of the repository to sync from. Default: master. + "syncDepth": "A String", # The depth of git commits synced by the git-sync container. "syncRepo": "A String", # The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, + "resourceRequirements": { # Specifies CPU and memory limits for containers, keyed by container name + "a_key": { # ResourceRequirements allows to override the CPU and memory resource requirements of a container. + "containerName": "A String", # Name of the container + "cpuLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + "memoryLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + }, + }, "sourceFormat": "A String", # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode. }, "hierarchyController": { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster. @@ -196,6 +209,7 @@

Method Details

"clusterName": "A String", # The user-defined name for the cluster used by ClusterSelectors to group clusters together. This should match Membership's membership_name, unless the user installed ACM on the cluster manually prior to enabling the ACM hub feature. Unique within a Anthos Config Management installation. "configSyncState": { # State information for ConfigSync # Current sync status "deploymentState": { # The state of ConfigSync's deployment on a cluster # Information about the deployment of ConfigSync, including the version of the various Pods deployed + "admissionWebhook": "A String", # Deployment state of admission-webhook "gitSync": "A String", # Deployment state of the git-sync pod "importer": "A String", # Deployment state of the importer pod "monitor": "A String", # Deployment state of the monitor pod @@ -230,6 +244,7 @@

Method Details

"syncToken": "A String", # Token indicating the state of the syncer. }, "version": { # Specific versioning information pertaining to ConfigSync's Pods # The version of ConfigSync deployed + "admissionWebhook": "A String", # Version of the deployed admission_webhook pod "gitSync": "A String", # Version of the deployed git-sync pod "importer": "A String", # Version of the deployed importer pod "monitor": "A String", # Version of the deployed monitor pod @@ -256,13 +271,26 @@

Method Details

"git": { # Git repo configuration for a single cluster. # Git repo configuration for the cluster. "gcpServiceAccountEmail": "A String", # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount. "httpsProxy": "A String", # URL for the HTTPS proxy to be used when communicating with the Git repo. + "noSslVerify": True or False, # Enable or disable the SSL certificate verification Default: false. "policyDir": "A String", # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. "secretType": "A String", # Type of secret configured for access to the Git repo. "syncBranch": "A String", # The branch of the repository to sync from. Default: master. + "syncDepth": "A String", # The depth of git commits synced by the git-sync container. "syncRepo": "A String", # The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, + "resourceRequirements": { # Specifies CPU and memory limits for containers, keyed by container name + "a_key": { # ResourceRequirements allows to override the CPU and memory resource requirements of a container. + "containerName": "A String", # Name of the container + "cpuLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + "memoryLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + }, + }, "sourceFormat": "A String", # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode. }, "hierarchyController": { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster. @@ -456,13 +484,26 @@

Method Details

"git": { # Git repo configuration for a single cluster. # Git repo configuration for the cluster. "gcpServiceAccountEmail": "A String", # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount. "httpsProxy": "A String", # URL for the HTTPS proxy to be used when communicating with the Git repo. + "noSslVerify": True or False, # Enable or disable the SSL certificate verification Default: false. "policyDir": "A String", # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. "secretType": "A String", # Type of secret configured for access to the Git repo. "syncBranch": "A String", # The branch of the repository to sync from. Default: master. + "syncDepth": "A String", # The depth of git commits synced by the git-sync container. "syncRepo": "A String", # The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, + "resourceRequirements": { # Specifies CPU and memory limits for containers, keyed by container name + "a_key": { # ResourceRequirements allows to override the CPU and memory resource requirements of a container. + "containerName": "A String", # Name of the container + "cpuLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + "memoryLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + }, + }, "sourceFormat": "A String", # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode. }, "hierarchyController": { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster. @@ -517,6 +558,7 @@

Method Details

"clusterName": "A String", # The user-defined name for the cluster used by ClusterSelectors to group clusters together. This should match Membership's membership_name, unless the user installed ACM on the cluster manually prior to enabling the ACM hub feature. Unique within a Anthos Config Management installation. "configSyncState": { # State information for ConfigSync # Current sync status "deploymentState": { # The state of ConfigSync's deployment on a cluster # Information about the deployment of ConfigSync, including the version of the various Pods deployed + "admissionWebhook": "A String", # Deployment state of admission-webhook "gitSync": "A String", # Deployment state of the git-sync pod "importer": "A String", # Deployment state of the importer pod "monitor": "A String", # Deployment state of the monitor pod @@ -551,6 +593,7 @@

Method Details

"syncToken": "A String", # Token indicating the state of the syncer. }, "version": { # Specific versioning information pertaining to ConfigSync's Pods # The version of ConfigSync deployed + "admissionWebhook": "A String", # Version of the deployed admission_webhook pod "gitSync": "A String", # Version of the deployed git-sync pod "importer": "A String", # Version of the deployed importer pod "monitor": "A String", # Version of the deployed monitor pod @@ -577,13 +620,26 @@

Method Details

"git": { # Git repo configuration for a single cluster. # Git repo configuration for the cluster. "gcpServiceAccountEmail": "A String", # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount. "httpsProxy": "A String", # URL for the HTTPS proxy to be used when communicating with the Git repo. + "noSslVerify": True or False, # Enable or disable the SSL certificate verification Default: false. "policyDir": "A String", # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. "secretType": "A String", # Type of secret configured for access to the Git repo. "syncBranch": "A String", # The branch of the repository to sync from. Default: master. + "syncDepth": "A String", # The depth of git commits synced by the git-sync container. "syncRepo": "A String", # The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, + "resourceRequirements": { # Specifies CPU and memory limits for containers, keyed by container name + "a_key": { # ResourceRequirements allows to override the CPU and memory resource requirements of a container. + "containerName": "A String", # Name of the container + "cpuLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + "memoryLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + }, + }, "sourceFormat": "A String", # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode. }, "hierarchyController": { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster. @@ -765,13 +821,26 @@

Method Details

"git": { # Git repo configuration for a single cluster. # Git repo configuration for the cluster. "gcpServiceAccountEmail": "A String", # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount. "httpsProxy": "A String", # URL for the HTTPS proxy to be used when communicating with the Git repo. + "noSslVerify": True or False, # Enable or disable the SSL certificate verification Default: false. "policyDir": "A String", # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. "secretType": "A String", # Type of secret configured for access to the Git repo. "syncBranch": "A String", # The branch of the repository to sync from. Default: master. + "syncDepth": "A String", # The depth of git commits synced by the git-sync container. "syncRepo": "A String", # The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, + "resourceRequirements": { # Specifies CPU and memory limits for containers, keyed by container name + "a_key": { # ResourceRequirements allows to override the CPU and memory resource requirements of a container. + "containerName": "A String", # Name of the container + "cpuLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + "memoryLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + }, + }, "sourceFormat": "A String", # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode. }, "hierarchyController": { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster. @@ -826,6 +895,7 @@

Method Details

"clusterName": "A String", # The user-defined name for the cluster used by ClusterSelectors to group clusters together. This should match Membership's membership_name, unless the user installed ACM on the cluster manually prior to enabling the ACM hub feature. Unique within a Anthos Config Management installation. "configSyncState": { # State information for ConfigSync # Current sync status "deploymentState": { # The state of ConfigSync's deployment on a cluster # Information about the deployment of ConfigSync, including the version of the various Pods deployed + "admissionWebhook": "A String", # Deployment state of admission-webhook "gitSync": "A String", # Deployment state of the git-sync pod "importer": "A String", # Deployment state of the importer pod "monitor": "A String", # Deployment state of the monitor pod @@ -860,6 +930,7 @@

Method Details

"syncToken": "A String", # Token indicating the state of the syncer. }, "version": { # Specific versioning information pertaining to ConfigSync's Pods # The version of ConfigSync deployed + "admissionWebhook": "A String", # Version of the deployed admission_webhook pod "gitSync": "A String", # Version of the deployed git-sync pod "importer": "A String", # Version of the deployed importer pod "monitor": "A String", # Version of the deployed monitor pod @@ -886,13 +957,26 @@

Method Details

"git": { # Git repo configuration for a single cluster. # Git repo configuration for the cluster. "gcpServiceAccountEmail": "A String", # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount. "httpsProxy": "A String", # URL for the HTTPS proxy to be used when communicating with the Git repo. + "noSslVerify": True or False, # Enable or disable the SSL certificate verification Default: false. "policyDir": "A String", # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. "secretType": "A String", # Type of secret configured for access to the Git repo. "syncBranch": "A String", # The branch of the repository to sync from. Default: master. + "syncDepth": "A String", # The depth of git commits synced by the git-sync container. "syncRepo": "A String", # The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, + "resourceRequirements": { # Specifies CPU and memory limits for containers, keyed by container name + "a_key": { # ResourceRequirements allows to override the CPU and memory resource requirements of a container. + "containerName": "A String", # Name of the container + "cpuLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + "memoryLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + }, + }, "sourceFormat": "A String", # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode. }, "hierarchyController": { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster. @@ -1030,13 +1114,26 @@

Method Details

"git": { # Git repo configuration for a single cluster. # Git repo configuration for the cluster. "gcpServiceAccountEmail": "A String", # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount. "httpsProxy": "A String", # URL for the HTTPS proxy to be used when communicating with the Git repo. + "noSslVerify": True or False, # Enable or disable the SSL certificate verification Default: false. "policyDir": "A String", # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. "secretType": "A String", # Type of secret configured for access to the Git repo. "syncBranch": "A String", # The branch of the repository to sync from. Default: master. + "syncDepth": "A String", # The depth of git commits synced by the git-sync container. "syncRepo": "A String", # The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, + "resourceRequirements": { # Specifies CPU and memory limits for containers, keyed by container name + "a_key": { # ResourceRequirements allows to override the CPU and memory resource requirements of a container. + "containerName": "A String", # Name of the container + "cpuLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + "memoryLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + }, + }, "sourceFormat": "A String", # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode. }, "hierarchyController": { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster. @@ -1091,6 +1188,7 @@

Method Details

"clusterName": "A String", # The user-defined name for the cluster used by ClusterSelectors to group clusters together. This should match Membership's membership_name, unless the user installed ACM on the cluster manually prior to enabling the ACM hub feature. Unique within a Anthos Config Management installation. "configSyncState": { # State information for ConfigSync # Current sync status "deploymentState": { # The state of ConfigSync's deployment on a cluster # Information about the deployment of ConfigSync, including the version of the various Pods deployed + "admissionWebhook": "A String", # Deployment state of admission-webhook "gitSync": "A String", # Deployment state of the git-sync pod "importer": "A String", # Deployment state of the importer pod "monitor": "A String", # Deployment state of the monitor pod @@ -1125,6 +1223,7 @@

Method Details

"syncToken": "A String", # Token indicating the state of the syncer. }, "version": { # Specific versioning information pertaining to ConfigSync's Pods # The version of ConfigSync deployed + "admissionWebhook": "A String", # Version of the deployed admission_webhook pod "gitSync": "A String", # Version of the deployed git-sync pod "importer": "A String", # Version of the deployed importer pod "monitor": "A String", # Version of the deployed monitor pod @@ -1151,13 +1250,26 @@

Method Details

"git": { # Git repo configuration for a single cluster. # Git repo configuration for the cluster. "gcpServiceAccountEmail": "A String", # The GCP Service Account Email used for auth when secret_type is gcpServiceAccount. "httpsProxy": "A String", # URL for the HTTPS proxy to be used when communicating with the Git repo. + "noSslVerify": True or False, # Enable or disable the SSL certificate verification Default: false. "policyDir": "A String", # The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. "secretType": "A String", # Type of secret configured for access to the Git repo. "syncBranch": "A String", # The branch of the repository to sync from. Default: master. + "syncDepth": "A String", # The depth of git commits synced by the git-sync container. "syncRepo": "A String", # The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Git revision (tag or hash) to check out. Default HEAD. "syncWaitSecs": "A String", # Period in seconds between consecutive syncs. Default: 15. }, + "resourceRequirements": { # Specifies CPU and memory limits for containers, keyed by container name + "a_key": { # ResourceRequirements allows to override the CPU and memory resource requirements of a container. + "containerName": "A String", # Name of the container + "cpuLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the CPU limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + "memoryLimit": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto # Allows to override the memory limit of a container + "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". + }, + }, + }, "sourceFormat": "A String", # Specifies whether the Config Sync Repo is in “hierarchical” or “unstructured” mode. }, "hierarchyController": { # Configuration for Hierarchy Controller # Hierarchy Controller configuration for the cluster. diff --git a/docs/dyn/healthcare_v1.projects.locations.datasets.consentStores.attributeDefinitions.html b/docs/dyn/healthcare_v1.projects.locations.datasets.consentStores.attributeDefinitions.html index 4c7c1ef8dc6..01de3072147 100644 --- a/docs/dyn/healthcare_v1.projects.locations.datasets.consentStores.attributeDefinitions.html +++ b/docs/dyn/healthcare_v1.projects.locations.datasets.consentStores.attributeDefinitions.html @@ -111,7 +111,7 @@

Method Details

The object takes the form of: { # A client-defined consent attribute. - "allowedValues": [ # Required. Possible values for the attribute. The number of allowed values must not exceed 100. An empty list is invalid. The list can only be expanded after creation. + "allowedValues": [ # Required. Possible values for the attribute. The number of allowed values must not exceed 500. An empty list is invalid. The list can only be expanded after creation. "A String", ], "category": "A String", # Required. The category of the attribute. The value of this field cannot be changed after creation. @@ -133,7 +133,7 @@

Method Details

An object of the form: { # A client-defined consent attribute. - "allowedValues": [ # Required. Possible values for the attribute. The number of allowed values must not exceed 100. An empty list is invalid. The list can only be expanded after creation. + "allowedValues": [ # Required. Possible values for the attribute. The number of allowed values must not exceed 500. An empty list is invalid. The list can only be expanded after creation. "A String", ], "category": "A String", # Required. The category of the attribute. The value of this field cannot be changed after creation. @@ -179,7 +179,7 @@

Method Details

An object of the form: { # A client-defined consent attribute. - "allowedValues": [ # Required. Possible values for the attribute. The number of allowed values must not exceed 100. An empty list is invalid. The list can only be expanded after creation. + "allowedValues": [ # Required. Possible values for the attribute. The number of allowed values must not exceed 500. An empty list is invalid. The list can only be expanded after creation. "A String", ], "category": "A String", # Required. The category of the attribute. The value of this field cannot be changed after creation. @@ -212,7 +212,7 @@

Method Details

{ "attributeDefinitions": [ # The returned Attribute definitions. The maximum number of attributes returned is determined by the value of page_size in the ListAttributeDefinitionsRequest. { # A client-defined consent attribute. - "allowedValues": [ # Required. Possible values for the attribute. The number of allowed values must not exceed 100. An empty list is invalid. The list can only be expanded after creation. + "allowedValues": [ # Required. Possible values for the attribute. The number of allowed values must not exceed 500. An empty list is invalid. The list can only be expanded after creation. "A String", ], "category": "A String", # Required. The category of the attribute. The value of this field cannot be changed after creation. @@ -252,7 +252,7 @@

Method Details

The object takes the form of: { # A client-defined consent attribute. - "allowedValues": [ # Required. Possible values for the attribute. The number of allowed values must not exceed 100. An empty list is invalid. The list can only be expanded after creation. + "allowedValues": [ # Required. Possible values for the attribute. The number of allowed values must not exceed 500. An empty list is invalid. The list can only be expanded after creation. "A String", ], "category": "A String", # Required. The category of the attribute. The value of this field cannot be changed after creation. @@ -274,7 +274,7 @@

Method Details

An object of the form: { # A client-defined consent attribute. - "allowedValues": [ # Required. Possible values for the attribute. The number of allowed values must not exceed 100. An empty list is invalid. The list can only be expanded after creation. + "allowedValues": [ # Required. Possible values for the attribute. The number of allowed values must not exceed 500. An empty list is invalid. The list can only be expanded after creation. "A String", ], "category": "A String", # Required. The category of the attribute. The value of this field cannot be changed after creation. diff --git a/docs/dyn/healthcare_v1beta1.projects.locations.datasets.consentStores.attributeDefinitions.html b/docs/dyn/healthcare_v1beta1.projects.locations.datasets.consentStores.attributeDefinitions.html index 097a5c5f08f..2dab70a33a5 100644 --- a/docs/dyn/healthcare_v1beta1.projects.locations.datasets.consentStores.attributeDefinitions.html +++ b/docs/dyn/healthcare_v1beta1.projects.locations.datasets.consentStores.attributeDefinitions.html @@ -111,7 +111,7 @@

Method Details

The object takes the form of: { # A client-defined consent attribute. - "allowedValues": [ # Required. Possible values for the attribute. The number of allowed values must not exceed 100. An empty list is invalid. The list can only be expanded after creation. + "allowedValues": [ # Required. Possible values for the attribute. The number of allowed values must not exceed 500. An empty list is invalid. The list can only be expanded after creation. "A String", ], "category": "A String", # Required. The category of the attribute. The value of this field cannot be changed after creation. @@ -133,7 +133,7 @@

Method Details

An object of the form: { # A client-defined consent attribute. - "allowedValues": [ # Required. Possible values for the attribute. The number of allowed values must not exceed 100. An empty list is invalid. The list can only be expanded after creation. + "allowedValues": [ # Required. Possible values for the attribute. The number of allowed values must not exceed 500. An empty list is invalid. The list can only be expanded after creation. "A String", ], "category": "A String", # Required. The category of the attribute. The value of this field cannot be changed after creation. @@ -179,7 +179,7 @@

Method Details

An object of the form: { # A client-defined consent attribute. - "allowedValues": [ # Required. Possible values for the attribute. The number of allowed values must not exceed 100. An empty list is invalid. The list can only be expanded after creation. + "allowedValues": [ # Required. Possible values for the attribute. The number of allowed values must not exceed 500. An empty list is invalid. The list can only be expanded after creation. "A String", ], "category": "A String", # Required. The category of the attribute. The value of this field cannot be changed after creation. @@ -212,7 +212,7 @@

Method Details

{ "attributeDefinitions": [ # The returned Attribute definitions. The maximum number of attributes returned is determined by the value of page_size in the ListAttributeDefinitionsRequest. { # A client-defined consent attribute. - "allowedValues": [ # Required. Possible values for the attribute. The number of allowed values must not exceed 100. An empty list is invalid. The list can only be expanded after creation. + "allowedValues": [ # Required. Possible values for the attribute. The number of allowed values must not exceed 500. An empty list is invalid. The list can only be expanded after creation. "A String", ], "category": "A String", # Required. The category of the attribute. The value of this field cannot be changed after creation. @@ -252,7 +252,7 @@

Method Details

The object takes the form of: { # A client-defined consent attribute. - "allowedValues": [ # Required. Possible values for the attribute. The number of allowed values must not exceed 100. An empty list is invalid. The list can only be expanded after creation. + "allowedValues": [ # Required. Possible values for the attribute. The number of allowed values must not exceed 500. An empty list is invalid. The list can only be expanded after creation. "A String", ], "category": "A String", # Required. The category of the attribute. The value of this field cannot be changed after creation. @@ -274,7 +274,7 @@

Method Details

An object of the form: { # A client-defined consent attribute. - "allowedValues": [ # Required. Possible values for the attribute. The number of allowed values must not exceed 100. An empty list is invalid. The list can only be expanded after creation. + "allowedValues": [ # Required. Possible values for the attribute. The number of allowed values must not exceed 500. An empty list is invalid. The list can only be expanded after creation. "A String", ], "category": "A String", # Required. The category of the attribute. The value of this field cannot be changed after creation. diff --git a/docs/dyn/iam_v1.projects.serviceAccounts.keys.html b/docs/dyn/iam_v1.projects.serviceAccounts.keys.html index dc5ba6aac96..f5549d5625a 100644 --- a/docs/dyn/iam_v1.projects.serviceAccounts.keys.html +++ b/docs/dyn/iam_v1.projects.serviceAccounts.keys.html @@ -85,10 +85,10 @@

Instance Methods

Deletes a ServiceAccountKey. Deleting a service account key does not revoke short-lived credentials that have been issued based on the service account key.

disable(name, body=None, x__xgafv=None)

-

Disable a ServiceAccountKey. A disabled service account key can be enabled through EnableServiceAccountKey. The API is currently in preview phase.

+

Disable a ServiceAccountKey. A disabled service account key can be enabled through EnableServiceAccountKey.

enable(name, body=None, x__xgafv=None)

-

Enable a ServiceAccountKey. The API is currently in preview phase.

+

Enable a ServiceAccountKey.

get(name, publicKeyType=None, x__xgafv=None)

Gets a ServiceAccountKey.

@@ -160,7 +160,7 @@

Method Details

disable(name, body=None, x__xgafv=None) -
Disable a ServiceAccountKey. A disabled service account key can be enabled through EnableServiceAccountKey. The API is currently in preview phase.
+  
Disable a ServiceAccountKey. A disabled service account key can be enabled through EnableServiceAccountKey.
 
 Args:
   name: string, Required. The resource name of the service account key in the following format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}/keys/{key}`. Using `-` as a wildcard for the `PROJECT_ID` will infer the project from the account. The `ACCOUNT` value can be the `email` address or the `unique_id` of the service account. (required)
@@ -184,7 +184,7 @@ 

Method Details

enable(name, body=None, x__xgafv=None) -
Enable a ServiceAccountKey. The API is currently in preview phase.
+  
Enable a ServiceAccountKey.
 
 Args:
   name: string, Required. The resource name of the service account key in the following format: `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}/keys/{key}`. Using `-` as a wildcard for the `PROJECT_ID` will infer the project from the account. The `ACCOUNT` value can be the `email` address or the `unique_id` of the service account. (required)
diff --git a/docs/dyn/metastore_v1alpha.projects.locations.services.backups.html b/docs/dyn/metastore_v1alpha.projects.locations.services.backups.html
index e0ae829d12c..5c51907e52c 100644
--- a/docs/dyn/metastore_v1alpha.projects.locations.services.backups.html
+++ b/docs/dyn/metastore_v1alpha.projects.locations.services.backups.html
@@ -148,7 +148,7 @@ 

Method Details

"labels": { # User-defined labels for the metastore service. "a_key": "A String", }, - "maintenanceWindow": { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. + "maintenanceWindow": { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type. "dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, @@ -311,7 +311,7 @@

Method Details

"labels": { # User-defined labels for the metastore service. "a_key": "A String", }, - "maintenanceWindow": { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. + "maintenanceWindow": { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type. "dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, @@ -462,7 +462,7 @@

Method Details

"labels": { # User-defined labels for the metastore service. "a_key": "A String", }, - "maintenanceWindow": { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. + "maintenanceWindow": { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type. "dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, diff --git a/docs/dyn/metastore_v1alpha.projects.locations.services.html b/docs/dyn/metastore_v1alpha.projects.locations.services.html index 1f9a46ad25b..6413ef83e29 100644 --- a/docs/dyn/metastore_v1alpha.projects.locations.services.html +++ b/docs/dyn/metastore_v1alpha.projects.locations.services.html @@ -164,7 +164,7 @@

Method Details

"labels": { # User-defined labels for the metastore service. "a_key": "A String", }, - "maintenanceWindow": { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. + "maintenanceWindow": { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type. "dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, @@ -361,7 +361,7 @@

Method Details

"labels": { # User-defined labels for the metastore service. "a_key": "A String", }, - "maintenanceWindow": { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. + "maintenanceWindow": { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type. "dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, @@ -503,7 +503,7 @@

Method Details

"labels": { # User-defined labels for the metastore service. "a_key": "A String", }, - "maintenanceWindow": { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. + "maintenanceWindow": { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type. "dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, @@ -604,7 +604,7 @@

Method Details

"labels": { # User-defined labels for the metastore service. "a_key": "A String", }, - "maintenanceWindow": { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. + "maintenanceWindow": { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type. "dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, diff --git a/docs/dyn/metastore_v1beta.projects.locations.services.backups.html b/docs/dyn/metastore_v1beta.projects.locations.services.backups.html index 5ee8ebdecb0..eaa2d95f4dc 100644 --- a/docs/dyn/metastore_v1beta.projects.locations.services.backups.html +++ b/docs/dyn/metastore_v1beta.projects.locations.services.backups.html @@ -148,7 +148,7 @@

Method Details

"labels": { # User-defined labels for the metastore service. "a_key": "A String", }, - "maintenanceWindow": { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. + "maintenanceWindow": { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type. "dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, @@ -311,7 +311,7 @@

Method Details

"labels": { # User-defined labels for the metastore service. "a_key": "A String", }, - "maintenanceWindow": { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. + "maintenanceWindow": { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type. "dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, @@ -462,7 +462,7 @@

Method Details

"labels": { # User-defined labels for the metastore service. "a_key": "A String", }, - "maintenanceWindow": { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. + "maintenanceWindow": { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type. "dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, diff --git a/docs/dyn/metastore_v1beta.projects.locations.services.html b/docs/dyn/metastore_v1beta.projects.locations.services.html index e9b52e2fe88..77dd517cb4b 100644 --- a/docs/dyn/metastore_v1beta.projects.locations.services.html +++ b/docs/dyn/metastore_v1beta.projects.locations.services.html @@ -159,7 +159,7 @@

Method Details

"labels": { # User-defined labels for the metastore service. "a_key": "A String", }, - "maintenanceWindow": { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. + "maintenanceWindow": { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type. "dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, @@ -356,7 +356,7 @@

Method Details

"labels": { # User-defined labels for the metastore service. "a_key": "A String", }, - "maintenanceWindow": { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. + "maintenanceWindow": { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type. "dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, @@ -498,7 +498,7 @@

Method Details

"labels": { # User-defined labels for the metastore service. "a_key": "A String", }, - "maintenanceWindow": { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. + "maintenanceWindow": { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type. "dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, @@ -599,7 +599,7 @@

Method Details

"labels": { # User-defined labels for the metastore service. "a_key": "A String", }, - "maintenanceWindow": { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. + "maintenanceWindow": { # Maintenance window. This specifies when Dataproc Metastore may perform system maintenance operation to the service. # The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type. "dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, diff --git a/docs/dyn/monitoring_v1.projects.dashboards.html b/docs/dyn/monitoring_v1.projects.dashboards.html index f86699d6ed1..1a23efd915c 100644 --- a/docs/dyn/monitoring_v1.projects.dashboards.html +++ b/docs/dyn/monitoring_v1.projects.dashboards.html @@ -218,6 +218,92 @@

Method Details

"content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. }, + "timeSeriesTable": { # A table that displays time series data. # A widget that displays time series data in a tabular format. + "dataSets": [ # Required. The data displayed in this table. + { # Groups a time series query definition with table options. + "minAlignmentPeriod": "A String", # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals. + "tableDisplayOptions": { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered. + "shownColumns": [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used. + "A String", + ], + }, + "tableTemplate": "A String", # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label's value i.e. "${resource.labels.project_id}." + "timeSeriesQuery": { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API. + "timeSeriesFilter": { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesFilterRatio": { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters. + "denominator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "numerator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + ], + }, "title": "A String", # Optional. The title of the widget. "xyChart": { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data. "chartOptions": { # Options to control visual rendering of a chart. # Display options for the chart. @@ -439,6 +525,92 @@

Method Details

"content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. }, + "timeSeriesTable": { # A table that displays time series data. # A widget that displays time series data in a tabular format. + "dataSets": [ # Required. The data displayed in this table. + { # Groups a time series query definition with table options. + "minAlignmentPeriod": "A String", # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals. + "tableDisplayOptions": { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered. + "shownColumns": [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used. + "A String", + ], + }, + "tableTemplate": "A String", # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label's value i.e. "${resource.labels.project_id}." + "timeSeriesQuery": { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API. + "timeSeriesFilter": { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesFilterRatio": { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters. + "denominator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "numerator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + ], + }, "title": "A String", # Optional. The title of the widget. "xyChart": { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data. "chartOptions": { # Options to control visual rendering of a chart. # Display options for the chart. @@ -658,6 +830,92 @@

Method Details

"content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. }, + "timeSeriesTable": { # A table that displays time series data. # A widget that displays time series data in a tabular format. + "dataSets": [ # Required. The data displayed in this table. + { # Groups a time series query definition with table options. + "minAlignmentPeriod": "A String", # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals. + "tableDisplayOptions": { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered. + "shownColumns": [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used. + "A String", + ], + }, + "tableTemplate": "A String", # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label's value i.e. "${resource.labels.project_id}." + "timeSeriesQuery": { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API. + "timeSeriesFilter": { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesFilterRatio": { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters. + "denominator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "numerator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + ], + }, "title": "A String", # Optional. The title of the widget. "xyChart": { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data. "chartOptions": { # Options to control visual rendering of a chart. # Display options for the chart. @@ -882,17 +1140,16 @@

Method Details

"content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. }, - "title": "A String", # Optional. The title of the widget. - "xyChart": { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data. - "chartOptions": { # Options to control visual rendering of a chart. # Display options for the chart. - "mode": "A String", # The chart mode. - }, - "dataSets": [ # Required. The data displayed in this chart. - { # Groups a time series query definition with charting options. - "legendTemplate": "A String", # A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label's value. + "timeSeriesTable": { # A table that displays time series data. # A widget that displays time series data in a tabular format. + "dataSets": [ # Required. The data displayed in this table. + { # Groups a time series query definition with table options. "minAlignmentPeriod": "A String", # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals. - "plotType": "A String", # How this data should be plotted on the chart. - "targetAxis": "A String", # Optional. The target axis to use for plotting the metric. + "tableDisplayOptions": { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered. + "shownColumns": [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used. + "A String", + ], + }, + "tableTemplate": "A String", # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label's value i.e. "${resource.labels.project_id}." "timeSeriesQuery": { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API. "timeSeriesFilter": { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series. "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. @@ -968,9 +1225,96 @@

Method Details

}, }, ], - "thresholds": [ # Threshold lines drawn horizontally across the chart. - { # Defines a threshold for categorizing time series values. - "color": "A String", # The state color for this threshold. Color is not allowed in a XyChart. + }, + "title": "A String", # Optional. The title of the widget. + "xyChart": { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data. + "chartOptions": { # Options to control visual rendering of a chart. # Display options for the chart. + "mode": "A String", # The chart mode. + }, + "dataSets": [ # Required. The data displayed in this chart. + { # Groups a time series query definition with charting options. + "legendTemplate": "A String", # A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label's value. + "minAlignmentPeriod": "A String", # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals. + "plotType": "A String", # How this data should be plotted on the chart. + "targetAxis": "A String", # Optional. The target axis to use for plotting the metric. + "timeSeriesQuery": { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API. + "timeSeriesFilter": { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesFilterRatio": { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters. + "denominator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "numerator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + ], + "thresholds": [ # Threshold lines drawn horizontally across the chart. + { # Defines a threshold for categorizing time series values. + "color": "A String", # The state color for this threshold. Color is not allowed in a XyChart. "direction": "A String", # The direction for the current threshold. Direction is not allowed in a XyChart. "label": "A String", # A label for the threshold. "targetAxis": "A String", # The target axis to use for plotting the threshold. Target axis is not allowed in a Scorecard. @@ -1115,6 +1459,92 @@

Method Details

"content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. }, + "timeSeriesTable": { # A table that displays time series data. # A widget that displays time series data in a tabular format. + "dataSets": [ # Required. The data displayed in this table. + { # Groups a time series query definition with table options. + "minAlignmentPeriod": "A String", # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals. + "tableDisplayOptions": { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered. + "shownColumns": [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used. + "A String", + ], + }, + "tableTemplate": "A String", # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label's value i.e. "${resource.labels.project_id}." + "timeSeriesQuery": { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API. + "timeSeriesFilter": { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesFilterRatio": { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters. + "denominator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "numerator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + ], + }, "title": "A String", # Optional. The title of the widget. "xyChart": { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data. "chartOptions": { # Options to control visual rendering of a chart. # Display options for the chart. @@ -1336,6 +1766,92 @@

Method Details

"content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. }, + "timeSeriesTable": { # A table that displays time series data. # A widget that displays time series data in a tabular format. + "dataSets": [ # Required. The data displayed in this table. + { # Groups a time series query definition with table options. + "minAlignmentPeriod": "A String", # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals. + "tableDisplayOptions": { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered. + "shownColumns": [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used. + "A String", + ], + }, + "tableTemplate": "A String", # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label's value i.e. "${resource.labels.project_id}." + "timeSeriesQuery": { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API. + "timeSeriesFilter": { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesFilterRatio": { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters. + "denominator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "numerator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + ], + }, "title": "A String", # Optional. The title of the widget. "xyChart": { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data. "chartOptions": { # Options to control visual rendering of a chart. # Display options for the chart. @@ -1555,6 +2071,92 @@

Method Details

"content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. }, + "timeSeriesTable": { # A table that displays time series data. # A widget that displays time series data in a tabular format. + "dataSets": [ # Required. The data displayed in this table. + { # Groups a time series query definition with table options. + "minAlignmentPeriod": "A String", # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals. + "tableDisplayOptions": { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered. + "shownColumns": [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used. + "A String", + ], + }, + "tableTemplate": "A String", # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label's value i.e. "${resource.labels.project_id}." + "timeSeriesQuery": { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API. + "timeSeriesFilter": { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesFilterRatio": { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters. + "denominator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "numerator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + ], + }, "title": "A String", # Optional. The title of the widget. "xyChart": { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data. "chartOptions": { # Options to control visual rendering of a chart. # Display options for the chart. @@ -1779,6 +2381,92 @@

Method Details

"content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. }, + "timeSeriesTable": { # A table that displays time series data. # A widget that displays time series data in a tabular format. + "dataSets": [ # Required. The data displayed in this table. + { # Groups a time series query definition with table options. + "minAlignmentPeriod": "A String", # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals. + "tableDisplayOptions": { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered. + "shownColumns": [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used. + "A String", + ], + }, + "tableTemplate": "A String", # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label's value i.e. "${resource.labels.project_id}." + "timeSeriesQuery": { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API. + "timeSeriesFilter": { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesFilterRatio": { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters. + "denominator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "numerator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + ], + }, "title": "A String", # Optional. The title of the widget. "xyChart": { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data. "chartOptions": { # Options to control visual rendering of a chart. # Display options for the chart. @@ -2028,13 +2716,99 @@

Method Details

"rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. }, }, - "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. - "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. - }, - }, - "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. - "content": "A String", # The text content to be displayed. - "format": "A String", # How the text content is formatted. + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. + "content": "A String", # The text content to be displayed. + "format": "A String", # How the text content is formatted. + }, + "timeSeriesTable": { # A table that displays time series data. # A widget that displays time series data in a tabular format. + "dataSets": [ # Required. The data displayed in this table. + { # Groups a time series query definition with table options. + "minAlignmentPeriod": "A String", # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals. + "tableDisplayOptions": { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered. + "shownColumns": [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used. + "A String", + ], + }, + "tableTemplate": "A String", # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label's value i.e. "${resource.labels.project_id}." + "timeSeriesQuery": { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API. + "timeSeriesFilter": { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesFilterRatio": { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters. + "denominator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "numerator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + ], }, "title": "A String", # Optional. The title of the widget. "xyChart": { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data. @@ -2257,6 +3031,92 @@

Method Details

"content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. }, + "timeSeriesTable": { # A table that displays time series data. # A widget that displays time series data in a tabular format. + "dataSets": [ # Required. The data displayed in this table. + { # Groups a time series query definition with table options. + "minAlignmentPeriod": "A String", # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals. + "tableDisplayOptions": { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered. + "shownColumns": [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used. + "A String", + ], + }, + "tableTemplate": "A String", # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label's value i.e. "${resource.labels.project_id}." + "timeSeriesQuery": { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API. + "timeSeriesFilter": { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesFilterRatio": { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters. + "denominator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "numerator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + ], + }, "title": "A String", # Optional. The title of the widget. "xyChart": { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data. "chartOptions": { # Options to control visual rendering of a chart. # Display options for the chart. @@ -2476,6 +3336,92 @@

Method Details

"content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. }, + "timeSeriesTable": { # A table that displays time series data. # A widget that displays time series data in a tabular format. + "dataSets": [ # Required. The data displayed in this table. + { # Groups a time series query definition with table options. + "minAlignmentPeriod": "A String", # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals. + "tableDisplayOptions": { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered. + "shownColumns": [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used. + "A String", + ], + }, + "tableTemplate": "A String", # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label's value i.e. "${resource.labels.project_id}." + "timeSeriesQuery": { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API. + "timeSeriesFilter": { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesFilterRatio": { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters. + "denominator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "numerator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + ], + }, "title": "A String", # Optional. The title of the widget. "xyChart": { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data. "chartOptions": { # Options to control visual rendering of a chart. # Display options for the chart. @@ -2692,13 +3638,99 @@

Method Details

"rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. }, }, - "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. - "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. - }, - }, - "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. - "content": "A String", # The text content to be displayed. - "format": "A String", # How the text content is formatted. + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. + "content": "A String", # The text content to be displayed. + "format": "A String", # How the text content is formatted. + }, + "timeSeriesTable": { # A table that displays time series data. # A widget that displays time series data in a tabular format. + "dataSets": [ # Required. The data displayed in this table. + { # Groups a time series query definition with table options. + "minAlignmentPeriod": "A String", # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals. + "tableDisplayOptions": { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered. + "shownColumns": [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used. + "A String", + ], + }, + "tableTemplate": "A String", # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label's value i.e. "${resource.labels.project_id}." + "timeSeriesQuery": { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API. + "timeSeriesFilter": { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesFilterRatio": { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters. + "denominator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "numerator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + ], }, "title": "A String", # Optional. The title of the widget. "xyChart": { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data. @@ -2943,6 +3975,92 @@

Method Details

"content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. }, + "timeSeriesTable": { # A table that displays time series data. # A widget that displays time series data in a tabular format. + "dataSets": [ # Required. The data displayed in this table. + { # Groups a time series query definition with table options. + "minAlignmentPeriod": "A String", # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals. + "tableDisplayOptions": { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered. + "shownColumns": [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used. + "A String", + ], + }, + "tableTemplate": "A String", # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label's value i.e. "${resource.labels.project_id}." + "timeSeriesQuery": { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API. + "timeSeriesFilter": { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesFilterRatio": { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters. + "denominator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "numerator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + ], + }, "title": "A String", # Optional. The title of the widget. "xyChart": { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data. "chartOptions": { # Options to control visual rendering of a chart. # Display options for the chart. @@ -3164,6 +4282,92 @@

Method Details

"content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. }, + "timeSeriesTable": { # A table that displays time series data. # A widget that displays time series data in a tabular format. + "dataSets": [ # Required. The data displayed in this table. + { # Groups a time series query definition with table options. + "minAlignmentPeriod": "A String", # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals. + "tableDisplayOptions": { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered. + "shownColumns": [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used. + "A String", + ], + }, + "tableTemplate": "A String", # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label's value i.e. "${resource.labels.project_id}." + "timeSeriesQuery": { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API. + "timeSeriesFilter": { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesFilterRatio": { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters. + "denominator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "numerator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + ], + }, "title": "A String", # Optional. The title of the widget. "xyChart": { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data. "chartOptions": { # Options to control visual rendering of a chart. # Display options for the chart. @@ -3375,13 +4579,99 @@

Method Details

"rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. }, }, - "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. - "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. - }, - }, - "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. - "content": "A String", # The text content to be displayed. - "format": "A String", # How the text content is formatted. + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. + "content": "A String", # The text content to be displayed. + "format": "A String", # How the text content is formatted. + }, + "timeSeriesTable": { # A table that displays time series data. # A widget that displays time series data in a tabular format. + "dataSets": [ # Required. The data displayed in this table. + { # Groups a time series query definition with table options. + "minAlignmentPeriod": "A String", # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals. + "tableDisplayOptions": { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered. + "shownColumns": [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used. + "A String", + ], + }, + "tableTemplate": "A String", # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label's value i.e. "${resource.labels.project_id}." + "timeSeriesQuery": { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API. + "timeSeriesFilter": { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesFilterRatio": { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters. + "denominator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "numerator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + ], }, "title": "A String", # Optional. The title of the widget. "xyChart": { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data. @@ -3607,6 +4897,92 @@

Method Details

"content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. }, + "timeSeriesTable": { # A table that displays time series data. # A widget that displays time series data in a tabular format. + "dataSets": [ # Required. The data displayed in this table. + { # Groups a time series query definition with table options. + "minAlignmentPeriod": "A String", # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals. + "tableDisplayOptions": { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered. + "shownColumns": [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used. + "A String", + ], + }, + "tableTemplate": "A String", # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label's value i.e. "${resource.labels.project_id}." + "timeSeriesQuery": { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API. + "timeSeriesFilter": { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesFilterRatio": { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters. + "denominator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "numerator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + ], + }, "title": "A String", # Optional. The title of the widget. "xyChart": { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data. "chartOptions": { # Options to control visual rendering of a chart. # Display options for the chart. @@ -3858,6 +5234,92 @@

Method Details

"content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. }, + "timeSeriesTable": { # A table that displays time series data. # A widget that displays time series data in a tabular format. + "dataSets": [ # Required. The data displayed in this table. + { # Groups a time series query definition with table options. + "minAlignmentPeriod": "A String", # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals. + "tableDisplayOptions": { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered. + "shownColumns": [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used. + "A String", + ], + }, + "tableTemplate": "A String", # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label's value i.e. "${resource.labels.project_id}." + "timeSeriesQuery": { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API. + "timeSeriesFilter": { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesFilterRatio": { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters. + "denominator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "numerator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + ], + }, "title": "A String", # Optional. The title of the widget. "xyChart": { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data. "chartOptions": { # Options to control visual rendering of a chart. # Display options for the chart. @@ -4071,13 +5533,99 @@

Method Details

"rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. }, }, - "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. - "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. - }, - }, - "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. - "content": "A String", # The text content to be displayed. - "format": "A String", # How the text content is formatted. + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. + "content": "A String", # The text content to be displayed. + "format": "A String", # How the text content is formatted. + }, + "timeSeriesTable": { # A table that displays time series data. # A widget that displays time series data in a tabular format. + "dataSets": [ # Required. The data displayed in this table. + { # Groups a time series query definition with table options. + "minAlignmentPeriod": "A String", # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals. + "tableDisplayOptions": { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered. + "shownColumns": [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used. + "A String", + ], + }, + "tableTemplate": "A String", # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label's value i.e. "${resource.labels.project_id}." + "timeSeriesQuery": { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API. + "timeSeriesFilter": { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesFilterRatio": { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters. + "denominator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "numerator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + ], }, "title": "A String", # Optional. The title of the widget. "xyChart": { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data. @@ -4298,6 +5846,92 @@

Method Details

"content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. }, + "timeSeriesTable": { # A table that displays time series data. # A widget that displays time series data in a tabular format. + "dataSets": [ # Required. The data displayed in this table. + { # Groups a time series query definition with table options. + "minAlignmentPeriod": "A String", # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals. + "tableDisplayOptions": { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered. + "shownColumns": [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used. + "A String", + ], + }, + "tableTemplate": "A String", # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label's value i.e. "${resource.labels.project_id}." + "timeSeriesQuery": { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API. + "timeSeriesFilter": { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesFilterRatio": { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters. + "denominator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "numerator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + ], + }, "title": "A String", # Optional. The title of the widget. "xyChart": { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data. "chartOptions": { # Options to control visual rendering of a chart. # Display options for the chart. @@ -4522,6 +6156,92 @@

Method Details

"content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. }, + "timeSeriesTable": { # A table that displays time series data. # A widget that displays time series data in a tabular format. + "dataSets": [ # Required. The data displayed in this table. + { # Groups a time series query definition with table options. + "minAlignmentPeriod": "A String", # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals. + "tableDisplayOptions": { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered. + "shownColumns": [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used. + "A String", + ], + }, + "tableTemplate": "A String", # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label's value i.e. "${resource.labels.project_id}." + "timeSeriesQuery": { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API. + "timeSeriesFilter": { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesFilterRatio": { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters. + "denominator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "numerator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + ], + }, "title": "A String", # Optional. The title of the widget. "xyChart": { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data. "chartOptions": { # Options to control visual rendering of a chart. # Display options for the chart. @@ -4747,13 +6467,99 @@

Method Details

"rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. }, }, - "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. - "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. - }, - }, - "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. - "content": "A String", # The text content to be displayed. - "format": "A String", # How the text content is formatted. + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + "text": { # A widget that displays textual content. # A raw string or markdown displaying textual content. + "content": "A String", # The text content to be displayed. + "format": "A String", # How the text content is formatted. + }, + "timeSeriesTable": { # A table that displays time series data. # A widget that displays time series data in a tabular format. + "dataSets": [ # Required. The data displayed in this table. + { # Groups a time series query definition with table options. + "minAlignmentPeriod": "A String", # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals. + "tableDisplayOptions": { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered. + "shownColumns": [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used. + "A String", + ], + }, + "tableTemplate": "A String", # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label's value i.e. "${resource.labels.project_id}." + "timeSeriesQuery": { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API. + "timeSeriesFilter": { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesFilterRatio": { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters. + "denominator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "numerator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + ], }, "title": "A String", # Optional. The title of the widget. "xyChart": { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data. @@ -4976,6 +6782,92 @@

Method Details

"content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. }, + "timeSeriesTable": { # A table that displays time series data. # A widget that displays time series data in a tabular format. + "dataSets": [ # Required. The data displayed in this table. + { # Groups a time series query definition with table options. + "minAlignmentPeriod": "A String", # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals. + "tableDisplayOptions": { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered. + "shownColumns": [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used. + "A String", + ], + }, + "tableTemplate": "A String", # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label's value i.e. "${resource.labels.project_id}." + "timeSeriesQuery": { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API. + "timeSeriesFilter": { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesFilterRatio": { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters. + "denominator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "numerator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + ], + }, "title": "A String", # Optional. The title of the widget. "xyChart": { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data. "chartOptions": { # Options to control visual rendering of a chart. # Display options for the chart. @@ -5195,6 +7087,92 @@

Method Details

"content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. }, + "timeSeriesTable": { # A table that displays time series data. # A widget that displays time series data in a tabular format. + "dataSets": [ # Required. The data displayed in this table. + { # Groups a time series query definition with table options. + "minAlignmentPeriod": "A String", # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals. + "tableDisplayOptions": { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered. + "shownColumns": [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used. + "A String", + ], + }, + "tableTemplate": "A String", # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label's value i.e. "${resource.labels.project_id}." + "timeSeriesQuery": { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API. + "timeSeriesFilter": { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesFilterRatio": { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters. + "denominator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "numerator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + ], + }, "title": "A String", # Optional. The title of the widget. "xyChart": { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data. "chartOptions": { # Options to control visual rendering of a chart. # Display options for the chart. @@ -5419,6 +7397,92 @@

Method Details

"content": "A String", # The text content to be displayed. "format": "A String", # How the text content is formatted. }, + "timeSeriesTable": { # A table that displays time series data. # A widget that displays time series data in a tabular format. + "dataSets": [ # Required. The data displayed in this table. + { # Groups a time series query definition with table options. + "minAlignmentPeriod": "A String", # Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals. + "tableDisplayOptions": { # Table display options that can be reused. # Optional. Table display options for configuring how the table is rendered. + "shownColumns": [ # Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used. + "A String", + ], + }, + "tableTemplate": "A String", # Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label's value i.e. "${resource.labels.project_id}." + "timeSeriesQuery": { # TimeSeriesQuery collects the set of supported methods for querying time series data from the Stackdriver metrics API. # Required. Fields for querying time series data from the Stackdriver metrics API. + "timeSeriesFilter": { # A filter that defines a subset of time series data that is displayed in a widget. Time series data is fetched using the ListTimeSeries (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) method. # Filter parameters to fetch time series. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after aggregation is applied. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesFilterRatio": { # A pair of time series filters that define a ratio computation. The output time series is the pair-wise division of each aligned element from the numerator and denominator time series. # Parameters to fetch a ratio between two time series filters. + "denominator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The denominator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "numerator": { # Describes a query to build the numerator or denominator of a TimeSeriesFilterRatio. # The numerator of the ratio. + "aggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "filter": "A String", # Required. The monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query. + }, + "pickTimeSeriesFilter": { # Describes a ranking-based time series filter. Each input time series is ranked with an aligner. The filter will allow up to num_time_series time series to pass through it, selecting them based on the relative ranking.For example, if ranking_method is METHOD_MEAN,direction is BOTTOM, and num_time_series is 3, then the 3 times series with the lowest mean values will pass through the filter. # Ranking based time series filter. + "direction": "A String", # How to use the ranking to select time series that pass through the filter. + "numTimeSeries": 42, # How many time series to allow to pass through the filter. + "rankingMethod": "A String", # ranking_method is applied to each time series independently to produce the value which will be used to compare the time series to other time series. + }, + "secondaryAggregation": { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). # Apply a second aggregation after the ratio is computed. + "alignmentPeriod": "A String", # The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks. + "crossSeriesReducer": "A String", # The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. + "groupByFields": [ # The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. + "A String", + ], + "perSeriesAligner": "A String", # An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. + }, + "statisticalTimeSeriesFilter": { # A filter that ranks streams based on their statistical relation to other streams in a request. Note: This field is deprecated and completely ignored by the API. # Statistics based time series filter. Note: This field is deprecated and completely ignored by the API. + "numTimeSeries": 42, # How many time series to output. + "rankingMethod": "A String", # rankingMethod is applied to a set of time series, and then the produced value for each individual time series is used to compare a given time series to others. These are methods that cannot be applied stream-by-stream, but rather require the full context of a request to evaluate time series. + }, + }, + "timeSeriesQueryLanguage": "A String", # A query used to fetch time series. + "unitOverride": "A String", # The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the unit (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in MetricDescriptor. + }, + }, + ], + }, "title": "A String", # Optional. The title of the widget. "xyChart": { # A chart that displays data on a 2D (X and Y axes) plane. # A chart of time series data. "chartOptions": { # Options to control visual rendering of a chart. # Display options for the chart. diff --git a/docs/dyn/policyanalyzer_v1.projects.locations.activityTypes.activities.html b/docs/dyn/policyanalyzer_v1.projects.locations.activityTypes.activities.html index 58913ddcbfb..d76837aed22 100644 --- a/docs/dyn/policyanalyzer_v1.projects.locations.activityTypes.activities.html +++ b/docs/dyn/policyanalyzer_v1.projects.locations.activityTypes.activities.html @@ -95,7 +95,7 @@

Method Details

Args: parent: string, Required. The container resource on which to execute the request. Acceptable formats: `projects/[PROJECT_ID|PROJECT_NUMBER]/locations/[LOCATION]/activityTypes/[ACTIVITY_TYPE]` LOCATION here refers to Google Cloud Locations: https://cloud.google.com/about/locations/ (required) - filter: string, Optional. Filter expression to restrict the activities returned. Supported filters are: - service_account_last_authn.full_resource_name {=} [STRING] - service_account_key_last_authn.full_resource_name {=} [STRING] + filter: string, Optional. Filter expression to restrict the activities returned. For serviceAccountLastAuthentication activities, supported filters are: - `activities.full_resource_name {=} [STRING]` - `activities.fullResourceName {=} [STRING]` where `[STRING]` is the full resource name of the service account. For serviceAccountKeyLastAuthentication activities, supported filters are: - `activities.full_resource_name {=} [STRING]` - `activities.fullResourceName {=} [STRING]` where `[STRING]` is the full resource name of the service account key. pageSize: integer, Optional. The maximum number of results to return from this request. Max limit is 1000. Non-positive values are ignored. The presence of `nextPageToken` in the response indicates that more results might be available. pageToken: string, Optional. If present, then retrieve the next batch of results from the preceding call to this method. `pageToken` must be the value of `nextPageToken` from the previous response. The values of other method parameters should be identical to those in the previous call. x__xgafv: string, V1 error format. diff --git a/docs/dyn/retail_v2.projects.locations.catalogs.branches.products.html b/docs/dyn/retail_v2.projects.locations.catalogs.branches.products.html index dcac944f8ad..f19aa2b9978 100644 --- a/docs/dyn/retail_v2.projects.locations.catalogs.branches.products.html +++ b/docs/dyn/retail_v2.projects.locations.catalogs.branches.products.html @@ -206,7 +206,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -236,7 +236,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -338,7 +338,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -368,7 +368,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -494,7 +494,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -524,7 +524,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -652,7 +652,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -682,7 +682,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -833,7 +833,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -863,7 +863,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -941,7 +941,7 @@

Method Details

Updates a Product.
 
 Args:
-  name: string, Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". (required)
+  name: string, Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -982,7 +982,7 @@ 

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -1012,7 +1012,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -1115,7 +1115,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -1145,7 +1145,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -1254,7 +1254,7 @@

Method Details

Updates inventory information for a Product while respecting the last update timestamps of each inventory field. This process is asynchronous and does not require the Product to exist before updating fulfillment information. If the request is valid, the update will be enqueued and processed downstream. As a consequence, when a response is returned, updates are not immediately manifested in the Product queried by GetProduct or ListProducts. When inventory is updated with CreateProduct and UpdateProduct, the specified inventory field value(s) will overwrite any existing value(s) while ignoring the last update time for this field. Furthermore, the last update time for the specified inventory fields will be overwritten to the time of the CreateProduct or UpdateProduct request. If no inventory fields are set in CreateProductRequest.product, then any pre-existing inventory information for this product will be used. If no inventory fields are set in UpdateProductRequest.set_mask, then any existing inventory information will be preserved. Pre-existing inventory information can only be updated with SetInventory, AddFulfillmentPlaces, and RemoveFulfillmentPlaces. This feature is only available for users who have Retail Search enabled. Please submit a form [here](https://cloud.google.com/contact) to contact cloud sales if you are interested in using Retail Search.
 
 Args:
-  name: string, Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". (required)
+  name: string, Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -1297,7 +1297,7 @@ 

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -1327,7 +1327,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], diff --git a/docs/dyn/retail_v2.projects.locations.catalogs.placements.html b/docs/dyn/retail_v2.projects.locations.catalogs.placements.html index 9e144715b31..3225181027c 100644 --- a/docs/dyn/retail_v2.projects.locations.catalogs.placements.html +++ b/docs/dyn/retail_v2.projects.locations.catalogs.placements.html @@ -182,7 +182,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -212,7 +212,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -475,7 +475,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -505,7 +505,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], diff --git a/docs/dyn/retail_v2.projects.locations.catalogs.userEvents.html b/docs/dyn/retail_v2.projects.locations.catalogs.userEvents.html index 27725cc8b9c..0d720adbe2c 100644 --- a/docs/dyn/retail_v2.projects.locations.catalogs.userEvents.html +++ b/docs/dyn/retail_v2.projects.locations.catalogs.userEvents.html @@ -231,7 +231,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -261,7 +261,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -537,7 +537,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -567,7 +567,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -724,7 +724,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -754,7 +754,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], diff --git a/docs/dyn/retail_v2alpha.projects.locations.catalogs.branches.products.html b/docs/dyn/retail_v2alpha.projects.locations.catalogs.branches.products.html index e0101597901..485e5e4123d 100644 --- a/docs/dyn/retail_v2alpha.projects.locations.catalogs.branches.products.html +++ b/docs/dyn/retail_v2alpha.projects.locations.catalogs.branches.products.html @@ -206,7 +206,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -236,7 +236,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -338,7 +338,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -368,7 +368,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -494,7 +494,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -524,7 +524,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -652,7 +652,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -682,7 +682,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -834,7 +834,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -864,7 +864,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -943,7 +943,7 @@

Method Details

Updates a Product.
 
 Args:
-  name: string, Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". (required)
+  name: string, Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -984,7 +984,7 @@ 

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -1014,7 +1014,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -1117,7 +1117,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -1147,7 +1147,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -1256,7 +1256,7 @@

Method Details

Updates inventory information for a Product while respecting the last update timestamps of each inventory field. This process is asynchronous and does not require the Product to exist before updating fulfillment information. If the request is valid, the update will be enqueued and processed downstream. As a consequence, when a response is returned, updates are not immediately manifested in the Product queried by GetProduct or ListProducts. When inventory is updated with CreateProduct and UpdateProduct, the specified inventory field value(s) will overwrite any existing value(s) while ignoring the last update time for this field. Furthermore, the last update time for the specified inventory fields will be overwritten to the time of the CreateProduct or UpdateProduct request. If no inventory fields are set in CreateProductRequest.product, then any pre-existing inventory information for this product will be used. If no inventory fields are set in UpdateProductRequest.set_mask, then any existing inventory information will be preserved. Pre-existing inventory information can only be updated with SetInventory, AddFulfillmentPlaces, and RemoveFulfillmentPlaces. This feature is only available for users who have Retail Search enabled. Please submit a form [here](https://cloud.google.com/contact) to contact cloud sales if you are interested in using Retail Search.
 
 Args:
-  name: string, Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". (required)
+  name: string, Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -1299,7 +1299,7 @@ 

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -1329,7 +1329,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], diff --git a/docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html b/docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html index e28eb9cac99..46db30ae4a5 100644 --- a/docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html +++ b/docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html @@ -182,7 +182,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -212,7 +212,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -476,7 +476,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -506,7 +506,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], diff --git a/docs/dyn/retail_v2alpha.projects.locations.catalogs.userEvents.html b/docs/dyn/retail_v2alpha.projects.locations.catalogs.userEvents.html index 4bf37d20466..6307920f2eb 100644 --- a/docs/dyn/retail_v2alpha.projects.locations.catalogs.userEvents.html +++ b/docs/dyn/retail_v2alpha.projects.locations.catalogs.userEvents.html @@ -231,7 +231,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -261,7 +261,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -537,7 +537,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -567,7 +567,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -724,7 +724,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -754,7 +754,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], diff --git a/docs/dyn/retail_v2beta.projects.locations.catalogs.branches.products.html b/docs/dyn/retail_v2beta.projects.locations.catalogs.branches.products.html index e961357939d..21286a24ca8 100644 --- a/docs/dyn/retail_v2beta.projects.locations.catalogs.branches.products.html +++ b/docs/dyn/retail_v2beta.projects.locations.catalogs.branches.products.html @@ -206,7 +206,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -236,7 +236,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -338,7 +338,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -368,7 +368,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -494,7 +494,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -524,7 +524,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -652,7 +652,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -682,7 +682,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -833,7 +833,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -863,7 +863,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -941,7 +941,7 @@

Method Details

Updates a Product.
 
 Args:
-  name: string, Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". (required)
+  name: string, Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -982,7 +982,7 @@ 

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -1012,7 +1012,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -1115,7 +1115,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -1145,7 +1145,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -1254,7 +1254,7 @@

Method Details

Updates inventory information for a Product while respecting the last update timestamps of each inventory field. This process is asynchronous and does not require the Product to exist before updating fulfillment information. If the request is valid, the update will be enqueued and processed downstream. As a consequence, when a response is returned, updates are not immediately manifested in the Product queried by GetProduct or ListProducts. When inventory is updated with CreateProduct and UpdateProduct, the specified inventory field value(s) will overwrite any existing value(s) while ignoring the last update time for this field. Furthermore, the last update time for the specified inventory fields will be overwritten to the time of the CreateProduct or UpdateProduct request. If no inventory fields are set in CreateProductRequest.product, then any pre-existing inventory information for this product will be used. If no inventory fields are set in UpdateProductRequest.set_mask, then any existing inventory information will be preserved. Pre-existing inventory information can only be updated with SetInventory, AddFulfillmentPlaces, and RemoveFulfillmentPlaces. This feature is only available for users who have Retail Search enabled. Please submit a form [here](https://cloud.google.com/contact) to contact cloud sales if you are interested in using Retail Search.
 
 Args:
-  name: string, Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". (required)
+  name: string, Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -1297,7 +1297,7 @@ 

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -1327,7 +1327,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], diff --git a/docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html b/docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html index 63ba4951283..ebccfc2e4d7 100644 --- a/docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html +++ b/docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html @@ -182,7 +182,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -212,7 +212,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -475,7 +475,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -505,7 +505,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], diff --git a/docs/dyn/retail_v2beta.projects.locations.catalogs.userEvents.html b/docs/dyn/retail_v2beta.projects.locations.catalogs.userEvents.html index e7a117b7c07..512939a1aa2 100644 --- a/docs/dyn/retail_v2beta.projects.locations.catalogs.userEvents.html +++ b/docs/dyn/retail_v2beta.projects.locations.catalogs.userEvents.html @@ -231,7 +231,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -261,7 +261,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -537,7 +537,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -567,7 +567,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], @@ -724,7 +724,7 @@

Method Details

"colorFamilies": [ # The standard color families. Strongly recommended to use the following standard color groups: "Red", "Pink", "Orange", "Yellow", "Purple", "Green", "Cyan", "Blue", "Brown", "White", "Gray", "Black" and "Mixed". Normally it is expected to have only 1 color family. May consider using single "Mixed" instead of multiple values. A maximum of 5 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], - "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). + "colors": [ # The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single "Mixed" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color). "A String", ], }, @@ -754,7 +754,7 @@

Method Details

"materials": [ # The material of the product. For example, "leather", "wooden". A maximum of 20 values are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [material](https://support.google.com/merchants/answer/6324410). Schema.org property [Product.material](https://schema.org/material). "A String", ], - "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be "default_branch". + "name": "A String", # Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. "patterns": [ # The pattern or graphic print of the product. For example, "striped", "polka dot", "paisley". A maximum of 20 values are allowed per Product. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [pattern](https://support.google.com/merchants/answer/6324483). Schema.org property [Product.pattern](https://schema.org/pattern). "A String", ], diff --git a/docs/dyn/servicenetworking_v1.services.html b/docs/dyn/servicenetworking_v1.services.html index b8d9933e349..f313640305e 100644 --- a/docs/dyn/servicenetworking_v1.services.html +++ b/docs/dyn/servicenetworking_v1.services.html @@ -132,6 +132,7 @@

Method Details

"consumerNetwork": "A String", # Required. The name of the service consumer's VPC network. The network must have an existing private connection that was provisioned through the connections.create method. The name must be in the following format: `projects/{project}/global/networks/{network}`, where {project} is a project number, such as `12345`. {network} is the name of a VPC network in the project. "description": "A String", # Optional. Description of the subnet. "ipPrefixLength": 42, # Required. The prefix length of the subnet's IP address range. Use CIDR range notation, such as `30` to provision a subnet with an `x.x.x.x/30` CIDR range. The IP address range is drawn from a pool of available ranges in the service consumer's allocated range. + "outsideAllocationPublicIpRange": "A String", # Optional. Enable outside allocation using public IP addresses. Any public IP range may be specified. If this field is provided, we will not use customer reserved ranges for this primary IP range. "privateIpv6GoogleAccess": "A String", # Optional. The private IPv6 google access type for the VMs in this subnet. For information about the access types that can be set using this field, see [subnetwork](https://cloud.google.com/compute/docs/reference/rest/v1/subnetworks) in the Compute API documentation. "region": "A String", # Required. The name of a [region](/compute/docs/regions-zones) for the subnet, such `europe-west1`. "requestedAddress": "A String", # Optional. The starting address of a range. The address must be a valid IPv4 address in the x.x.x.x format. This value combined with the IP prefix range is the CIDR range for the subnet. The range must be within the allocated range that is assigned to the private connection. If the CIDR range isn't available, the call fails. @@ -141,6 +142,7 @@

Method Details

"secondaryIpRangeSpecs": [ # Optional. A list of secondary IP ranges to be created within the new subnetwork. { "ipPrefixLength": 42, # Required. The prefix length of the secondary IP range. Use CIDR range notation, such as `30` to provision a secondary IP range with an `x.x.x.x/30` CIDR range. The IP address range is drawn from a pool of available ranges in the service consumer's allocated range. + "outsideAllocationPublicIpRange": "A String", # Optional. Enable outside allocation using public IP addresses. Any public IP range may be specified. If this field is provided, we will not use customer reserved ranges for this secondary IP range. "rangeName": "A String", # Required. A name for the secondary IP range. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork. "requestedAddress": "A String", # Optional. The starting address of a range. The address must be a valid IPv4 address in the x.x.x.x format. This value combined with the IP prefix range is the CIDR range for the secondary IP range. The range must be within the allocated range that is assigned to the private connection. If the CIDR range isn't available, the call fails. }, diff --git a/googleapiclient/discovery_cache/documents/acceleratedmobilepageurl.v1.json b/googleapiclient/discovery_cache/documents/acceleratedmobilepageurl.v1.json index 0e7d59ff1e2..2d23da134dd 100644 --- a/googleapiclient/discovery_cache/documents/acceleratedmobilepageurl.v1.json +++ b/googleapiclient/discovery_cache/documents/acceleratedmobilepageurl.v1.json @@ -115,7 +115,7 @@ } } }, - "revision": "20210918", + "revision": "20210925", "rootUrl": "https://acceleratedmobilepageurl.googleapis.com/", "schemas": { "AmpUrl": { diff --git a/googleapiclient/discovery_cache/documents/adexchangebuyer2.v2beta1.json b/googleapiclient/discovery_cache/documents/adexchangebuyer2.v2beta1.json index 26ad192fb81..125e66bb9cf 100644 --- a/googleapiclient/discovery_cache/documents/adexchangebuyer2.v2beta1.json +++ b/googleapiclient/discovery_cache/documents/adexchangebuyer2.v2beta1.json @@ -2568,7 +2568,7 @@ } } }, - "revision": "20210918", + "revision": "20210925", "rootUrl": "https://adexchangebuyer.googleapis.com/", "schemas": { "AbsoluteDateRange": { @@ -2835,7 +2835,7 @@ "type": "string" }, "clientName": { - "description": "Name used to represent this client to publishers. You may have multiple clients that map to the same entity, but for each client the combination of `clientName` and entity must be unique. You can specify this field as empty.", + "description": "Name used to represent this client to publishers. You may have multiple clients that map to the same entity, but for each client the combination of `clientName` and entity must be unique. You can specify this field as empty. Maximum length of 255 characters is allowed.", "type": "string" }, "entityId": { diff --git a/googleapiclient/discovery_cache/documents/admin.datatransfer_v1.json b/googleapiclient/discovery_cache/documents/admin.datatransfer_v1.json index e8964f6666b..8931593f482 100644 --- a/googleapiclient/discovery_cache/documents/admin.datatransfer_v1.json +++ b/googleapiclient/discovery_cache/documents/admin.datatransfer_v1.json @@ -272,7 +272,7 @@ } } }, - "revision": "20210914", + "revision": "20210920", "rootUrl": "https://admin.googleapis.com/", "schemas": { "Application": { diff --git a/googleapiclient/discovery_cache/documents/admin.directory_v1.json b/googleapiclient/discovery_cache/documents/admin.directory_v1.json index 46bedba768f..87b93955c50 100644 --- a/googleapiclient/discovery_cache/documents/admin.directory_v1.json +++ b/googleapiclient/discovery_cache/documents/admin.directory_v1.json @@ -4397,7 +4397,7 @@ } } }, - "revision": "20210914", + "revision": "20210920", "rootUrl": "https://admin.googleapis.com/", "schemas": { "Alias": { diff --git a/googleapiclient/discovery_cache/documents/admin.reports_v1.json b/googleapiclient/discovery_cache/documents/admin.reports_v1.json index 0da8099d719..02d744b23fc 100644 --- a/googleapiclient/discovery_cache/documents/admin.reports_v1.json +++ b/googleapiclient/discovery_cache/documents/admin.reports_v1.json @@ -631,7 +631,7 @@ } } }, - "revision": "20210914", + "revision": "20210920", "rootUrl": "https://admin.googleapis.com/", "schemas": { "Activities": { diff --git a/googleapiclient/discovery_cache/documents/admob.v1.json b/googleapiclient/discovery_cache/documents/admob.v1.json index a5cca3ea2ba..81d7a668036 100644 --- a/googleapiclient/discovery_cache/documents/admob.v1.json +++ b/googleapiclient/discovery_cache/documents/admob.v1.json @@ -321,7 +321,7 @@ } } }, - "revision": "20210918", + "revision": "20210925", "rootUrl": "https://admob.googleapis.com/", "schemas": { "AdUnit": { diff --git a/googleapiclient/discovery_cache/documents/admob.v1beta.json b/googleapiclient/discovery_cache/documents/admob.v1beta.json index 6ee3b30ac3c..d81c570e7aa 100644 --- a/googleapiclient/discovery_cache/documents/admob.v1beta.json +++ b/googleapiclient/discovery_cache/documents/admob.v1beta.json @@ -321,7 +321,7 @@ } } }, - "revision": "20210918", + "revision": "20210925", "rootUrl": "https://admob.googleapis.com/", "schemas": { "AdUnit": { diff --git a/googleapiclient/discovery_cache/documents/adsense.v2.json b/googleapiclient/discovery_cache/documents/adsense.v2.json index cc97ccebafe..abe64e70add 100644 --- a/googleapiclient/discovery_cache/documents/adsense.v2.json +++ b/googleapiclient/discovery_cache/documents/adsense.v2.json @@ -1567,7 +1567,7 @@ } } }, - "revision": "20210918", + "revision": "20210925", "rootUrl": "https://adsense.googleapis.com/", "schemas": { "Account": { diff --git a/googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json b/googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json index 332830c1be3..1e2fdbda002 100644 --- a/googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json @@ -423,7 +423,7 @@ } } }, - "revision": "20210907", + "revision": "20210921", "rootUrl": "https://alertcenter.googleapis.com/", "schemas": { "AccountSuspensionDetails": { diff --git a/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json b/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json index 8e9ede4dfc6..58509fc8585 100644 --- a/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json @@ -3152,7 +3152,7 @@ } } }, - "revision": "20210918", + "revision": "20210924", "rootUrl": "https://analyticsadmin.googleapis.com/", "schemas": { "GoogleAnalyticsAdminV1alphaAccount": { @@ -4464,6 +4464,10 @@ "description": "A resource message representing a Google Analytics GA4 property.", "id": "GoogleAnalyticsAdminV1alphaProperty", "properties": { + "account": { + "description": "Immutable. The resource name of the parent account Format: accounts/{account_id} Example: \"accounts/123\"", + "type": "string" + }, "createTime": { "description": "Output only. Time when the entity was originally created.", "format": "google-datetime", diff --git a/googleapiclient/discovery_cache/documents/analyticsdata.v1beta.json b/googleapiclient/discovery_cache/documents/analyticsdata.v1beta.json index f52414d25eb..81a8553edc8 100644 --- a/googleapiclient/discovery_cache/documents/analyticsdata.v1beta.json +++ b/googleapiclient/discovery_cache/documents/analyticsdata.v1beta.json @@ -313,7 +313,7 @@ } } }, - "revision": "20210917", + "revision": "20210924", "rootUrl": "https://analyticsdata.googleapis.com/", "schemas": { "BatchRunPivotReportsRequest": { diff --git a/googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json b/googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json index 69a9aa8d40a..8a348c04d93 100644 --- a/googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json +++ b/googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json @@ -825,7 +825,7 @@ } } }, - "revision": "20210914", + "revision": "20210924", "rootUrl": "https://androiddeviceprovisioning.googleapis.com/", "schemas": { "ClaimDeviceRequest": { diff --git a/googleapiclient/discovery_cache/documents/androidenterprise.v1.json b/googleapiclient/discovery_cache/documents/androidenterprise.v1.json index c1a7f034fe4..131ef9fbaa7 100644 --- a/googleapiclient/discovery_cache/documents/androidenterprise.v1.json +++ b/googleapiclient/discovery_cache/documents/androidenterprise.v1.json @@ -2610,7 +2610,7 @@ } } }, - "revision": "20210916", + "revision": "20210923", "rootUrl": "https://androidenterprise.googleapis.com/", "schemas": { "Administrator": { diff --git a/googleapiclient/discovery_cache/documents/androidpublisher.v3.json b/googleapiclient/discovery_cache/documents/androidpublisher.v3.json index 7ab3174f2bd..60b7573c024 100644 --- a/googleapiclient/discovery_cache/documents/androidpublisher.v3.json +++ b/googleapiclient/discovery_cache/documents/androidpublisher.v3.json @@ -2681,7 +2681,7 @@ } } }, - "revision": "20210918", + "revision": "20210925", "rootUrl": "https://androidpublisher.googleapis.com/", "schemas": { "Apk": { diff --git a/googleapiclient/discovery_cache/documents/apigateway.v1.json b/googleapiclient/discovery_cache/documents/apigateway.v1.json index 184bdc32d1d..dc2bc7fa329 100644 --- a/googleapiclient/discovery_cache/documents/apigateway.v1.json +++ b/googleapiclient/discovery_cache/documents/apigateway.v1.json @@ -1083,7 +1083,7 @@ } } }, - "revision": "20210908", + "revision": "20210915", "rootUrl": "https://apigateway.googleapis.com/", "schemas": { "ApigatewayApi": { diff --git a/googleapiclient/discovery_cache/documents/apigateway.v1beta.json b/googleapiclient/discovery_cache/documents/apigateway.v1beta.json index e4f65c4292f..90166bee1d1 100644 --- a/googleapiclient/discovery_cache/documents/apigateway.v1beta.json +++ b/googleapiclient/discovery_cache/documents/apigateway.v1beta.json @@ -1083,7 +1083,7 @@ } } }, - "revision": "20210908", + "revision": "20210915", "rootUrl": "https://apigateway.googleapis.com/", "schemas": { "ApigatewayApi": { diff --git a/googleapiclient/discovery_cache/documents/apigee.v1.json b/googleapiclient/discovery_cache/documents/apigee.v1.json index 7cda74ffc8c..3821ddc6a62 100644 --- a/googleapiclient/discovery_cache/documents/apigee.v1.json +++ b/googleapiclient/discovery_cache/documents/apigee.v1.json @@ -10978,6 +10978,7 @@ "type": "string" }, "displayName": { + "description": "Display name for the Apigee organization. Unused, but reserved for future use.", "type": "string" }, "environments": { @@ -11443,7 +11444,7 @@ "type": "string" }, "paymentFundingModel": { - "description": "Flag that specifies the billing account type, prepaid or postpaid.", + "description": "DEPRECATED: This field is no longer supported and will eventually be removed when Apigee Hybrid 1.5/1.6 is no longer supported. Instead, use the `billingType` field inside `DeveloperMonetizationConfig` resource. Flag that specifies the billing account type, prepaid or postpaid.", "enum": [ "PAYMENT_FUNDING_MODEL_UNSPECIFIED", "PREPAID", diff --git a/googleapiclient/discovery_cache/documents/apikeys.v2.json b/googleapiclient/discovery_cache/documents/apikeys.v2.json index 8bbb17e5cf8..54d794af24a 100644 --- a/googleapiclient/discovery_cache/documents/apikeys.v2.json +++ b/googleapiclient/discovery_cache/documents/apikeys.v2.json @@ -424,7 +424,7 @@ } } }, - "revision": "20210910", + "revision": "20210924", "rootUrl": "https://apikeys.googleapis.com/", "schemas": { "Operation": { diff --git a/googleapiclient/discovery_cache/documents/appengine.v1.json b/googleapiclient/discovery_cache/documents/appengine.v1.json index 950f12fb8a1..d713be253eb 100644 --- a/googleapiclient/discovery_cache/documents/appengine.v1.json +++ b/googleapiclient/discovery_cache/documents/appengine.v1.json @@ -1595,7 +1595,7 @@ } } }, - "revision": "20210911", + "revision": "20210918", "rootUrl": "https://appengine.googleapis.com/", "schemas": { "ApiConfigHandler": { @@ -3305,6 +3305,13 @@ "description": "Relative name of the service within the application. Example: default.@OutputOnly", "type": "string" }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "A set of labels to apply to this service. Labels are key/value pairs that describe the service and all resources that belong to it (e.g., versions). The labels can be used to search and group resources, and are propagated to the usage and billing reports, enabling fine-grain analysis of costs. An example of using labels is to tag resources belonging to different environments (e.g., \"env=prod\", \"env=qa\"). Label keys and values can be no longer than 63 characters, can only contain lowercase letters, numeric characters, underscores, dashes, and international characters. Label keys must start with a lowercase letter or an international character. Each service can have at most 32 labels.", + "type": "object" + }, "name": { "description": "Full path to the Service resource in the API. Example: apps/myapp/services/default.@OutputOnly", "type": "string" diff --git a/googleapiclient/discovery_cache/documents/appengine.v1alpha.json b/googleapiclient/discovery_cache/documents/appengine.v1alpha.json index 11249a9f59b..4d071d1f0f2 100644 --- a/googleapiclient/discovery_cache/documents/appengine.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/appengine.v1alpha.json @@ -709,7 +709,7 @@ } } }, - "revision": "20210911", + "revision": "20210918", "rootUrl": "https://appengine.googleapis.com/", "schemas": { "AuthorizedCertificate": { diff --git a/googleapiclient/discovery_cache/documents/appengine.v1beta.json b/googleapiclient/discovery_cache/documents/appengine.v1beta.json index 33ad1965386..576ef4c59f1 100644 --- a/googleapiclient/discovery_cache/documents/appengine.v1beta.json +++ b/googleapiclient/discovery_cache/documents/appengine.v1beta.json @@ -1595,7 +1595,7 @@ } } }, - "revision": "20210911", + "revision": "20210918", "rootUrl": "https://appengine.googleapis.com/", "schemas": { "ApiConfigHandler": { @@ -3368,6 +3368,13 @@ "description": "Relative name of the service within the application. Example: default.@OutputOnly", "type": "string" }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "A set of labels to apply to this service. Labels are key/value pairs that describe the service and all resources that belong to it (e.g., versions). The labels can be used to search and group resources, and are propagated to the usage and billing reports, enabling fine-grain analysis of costs. An example of using labels is to tag resources belonging to different environments (e.g., \"env=prod\", \"env=qa\"). Label keys and values can be no longer than 63 characters, can only contain lowercase letters, numeric characters, underscores, dashes, and international characters. Label keys must start with a lowercase letter or an international character. Each service can have at most 32 labels.", + "type": "object" + }, "name": { "description": "Full path to the Service resource in the API. Example: apps/myapp/services/default.@OutputOnly", "type": "string" diff --git a/googleapiclient/discovery_cache/documents/area120tables.v1alpha1.json b/googleapiclient/discovery_cache/documents/area120tables.v1alpha1.json index 7f6ff319fcf..955b05a1637 100644 --- a/googleapiclient/discovery_cache/documents/area120tables.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/area120tables.v1alpha1.json @@ -586,7 +586,7 @@ } } }, - "revision": "20210918", + "revision": "20210924", "rootUrl": "https://area120tables.googleapis.com/", "schemas": { "BatchCreateRowsRequest": { @@ -690,6 +690,10 @@ "description": "column name", "type": "string" }, + "readonly": { + "description": "Optional. Indicates that values for the column cannot be set by the user.", + "type": "boolean" + }, "relationshipDetails": { "$ref": "RelationshipDetails", "description": "Optional. Additional details about a relationship column. Specified when data_type is relationship." diff --git a/googleapiclient/discovery_cache/documents/artifactregistry.v1.json b/googleapiclient/discovery_cache/documents/artifactregistry.v1.json index c1f3e03b5a3..a9af0fddd1e 100644 --- a/googleapiclient/discovery_cache/documents/artifactregistry.v1.json +++ b/googleapiclient/discovery_cache/documents/artifactregistry.v1.json @@ -352,7 +352,7 @@ } } }, - "revision": "20210910", + "revision": "20210918", "rootUrl": "https://artifactregistry.googleapis.com/", "schemas": { "AptArtifact": { @@ -673,7 +673,6 @@ "DOCKER", "MAVEN", "NPM", - "PYPI", "APT", "YUM", "PYTHON" @@ -683,7 +682,6 @@ "Docker package format.", "Maven package format.", "NPM package format.", - "PyPI package format.", "APT package format.", "YUM package format.", "Python package format." diff --git a/googleapiclient/discovery_cache/documents/artifactregistry.v1beta1.json b/googleapiclient/discovery_cache/documents/artifactregistry.v1beta1.json index 3d77e0c82d2..a1638041955 100644 --- a/googleapiclient/discovery_cache/documents/artifactregistry.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/artifactregistry.v1beta1.json @@ -929,7 +929,7 @@ } } }, - "revision": "20210910", + "revision": "20210918", "rootUrl": "https://artifactregistry.googleapis.com/", "schemas": { "AptArtifact": { @@ -1456,7 +1456,7 @@ "Docker package format.", "Maven package format.", "NPM package format.", - "PyPI package format.", + "PyPI package format. Deprecated, use PYTHON instead.", "APT package format.", "YUM package format.", "Python package format." diff --git a/googleapiclient/discovery_cache/documents/artifactregistry.v1beta2.json b/googleapiclient/discovery_cache/documents/artifactregistry.v1beta2.json index b776df5d47e..ddb90bc14fb 100644 --- a/googleapiclient/discovery_cache/documents/artifactregistry.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/artifactregistry.v1beta2.json @@ -1081,7 +1081,7 @@ } } }, - "revision": "20210910", + "revision": "20210918", "rootUrl": "https://artifactregistry.googleapis.com/", "schemas": { "AptArtifact": { @@ -1630,7 +1630,7 @@ "Docker package format.", "Maven package format.", "NPM package format.", - "PyPI package format.", + "PyPI package format. Deprecated, use PYTHON instead.", "APT package format.", "YUM package format.", "Python package format." diff --git a/googleapiclient/discovery_cache/documents/assuredworkloads.v1.json b/googleapiclient/discovery_cache/documents/assuredworkloads.v1.json index 6227efd7483..064c575b7ba 100644 --- a/googleapiclient/discovery_cache/documents/assuredworkloads.v1.json +++ b/googleapiclient/discovery_cache/documents/assuredworkloads.v1.json @@ -351,7 +351,7 @@ } } }, - "revision": "20210910", + "revision": "20210916", "rootUrl": "https://assuredworkloads.googleapis.com/", "schemas": { "GoogleCloudAssuredworkloadsV1CreateWorkloadOperationMetadata": { @@ -425,7 +425,7 @@ "id": "GoogleCloudAssuredworkloadsV1Workload", "properties": { "billingAccount": { - "description": "Required. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`.", + "description": "Optional. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`.", "type": "string" }, "complianceRegime": { diff --git a/googleapiclient/discovery_cache/documents/bigquery.v2.json b/googleapiclient/discovery_cache/documents/bigquery.v2.json index 021c5ec5f7c..d5838923c13 100644 --- a/googleapiclient/discovery_cache/documents/bigquery.v2.json +++ b/googleapiclient/discovery_cache/documents/bigquery.v2.json @@ -338,7 +338,7 @@ ] }, "delete": { - "description": "Requests that a job is deleted. This call will return when the job is deleted. This method is available in limited preview.", + "description": "Requests the deletion of the metadata of a job. This call returns when the job's metadata is deleted.", "flatPath": "projects/{projectsId}/jobs/{jobsId}/delete", "httpMethod": "DELETE", "id": "bigquery.jobs.delete", @@ -348,7 +348,7 @@ ], "parameters": { "jobId": { - "description": "Required. Job ID of the job to be deleted. If this is a parent job which has child jobs, all child jobs will be deleted as well. Deletion of child jobs directly is not allowed.", + "description": "Required. Job ID of the job for which metadata is to be deleted. If this is a parent job which has child jobs, the metadata from all child jobs will be deleted as well. Direct deletion of the metadata of child jobs is not allowed.", "location": "path", "pattern": "^[^/]+$", "required": true, @@ -360,7 +360,7 @@ "type": "string" }, "projectId": { - "description": "Required. Project ID of the job to be deleted.", + "description": "Required. Project ID of the job for which metadata is to be deleted.", "location": "path", "pattern": "^[^/]+$", "required": true, @@ -1683,7 +1683,7 @@ } } }, - "revision": "20210906", + "revision": "20210919", "rootUrl": "https://bigquery.googleapis.com/", "schemas": { "AggregateClassificationMetrics": { @@ -1868,7 +1868,7 @@ "YEARLY" ], "enumDescriptions": [ - "", + "Unspecified seasonal period.", "No seasonality", "Daily period, 24 hours.", "Weekly period, 7 days.", @@ -1935,7 +1935,7 @@ "YEARLY" ], "enumDescriptions": [ - "", + "Unspecified seasonal period.", "No seasonality", "Daily period, 24 hours.", "Weekly period, 7 days.", @@ -2007,7 +2007,7 @@ "YEARLY" ], "enumDescriptions": [ - "", + "Unspecified seasonal period.", "No seasonality", "Daily period, 24 hours.", "Weekly period, 7 days.", @@ -2063,7 +2063,7 @@ "YEARLY" ], "enumDescriptions": [ - "", + "Unspecified seasonal period.", "No seasonality", "Daily period, 24 hours.", "Weekly period, 7 days.", @@ -6087,10 +6087,53 @@ "format": "int64", "type": "string" }, + "boosterType": { + "description": "Booster type for boosted tree models.", + "enum": [ + "BOOSTER_TYPE_UNSPECIFIED", + "GBTREE", + "DART" + ], + "enumDescriptions": [ + "Unspecified booster type.", + "Gbtree booster.", + "Dart booster." + ], + "type": "string" + }, "cleanSpikesAndDips": { "description": "If true, clean spikes and dips in the input time series.", "type": "boolean" }, + "colsampleBylevel": { + "description": "Subsample ratio of columns for each level for boosted tree models.", + "format": "double", + "type": "number" + }, + "colsampleBynode": { + "description": "Subsample ratio of columns for each node(split) for boosted tree models.", + "format": "double", + "type": "number" + }, + "colsampleBytree": { + "description": "Subsample ratio of columns when constructing each tree for boosted tree models.", + "format": "double", + "type": "number" + }, + "dartNormalizeType": { + "description": "Type of normalization algorithm for boosted tree models using dart booster.", + "enum": [ + "DART_NORMALIZE_TYPE_UNSPECIFIED", + "TREE", + "FOREST" + ], + "enumDescriptions": [ + "Unspecified dart normalize type.", + "New trees have the same weight of each of dropped trees.", + "New trees have the same weight of sum of dropped trees." + ], + "type": "string" + }, "dataFrequency": { "description": "The data frequency of a time series.", "enum": [ @@ -6457,6 +6500,11 @@ "format": "double", "type": "number" }, + "minTreeChildWeight": { + "description": "Minimum sum of instance weight needed in a child for boosted tree models.", + "format": "int64", + "type": "string" + }, "modelUri": { "description": "Google Cloud Storage URI from which the model was imported. Only applicable for imported models.", "type": "string" @@ -6475,6 +6523,11 @@ "format": "int64", "type": "string" }, + "numParallelTree": { + "description": "Number of parallel trees constructed during each iteration for boosted tree models.", + "format": "int64", + "type": "string" + }, "optimizationStrategy": { "description": "Optimization strategy for training linear regression models.", "enum": [ @@ -6517,6 +6570,24 @@ "description": "Column to be designated as time series timestamp for ARIMA model.", "type": "string" }, + "treeMethod": { + "description": "Tree construction algorithm for boosted tree models.", + "enum": [ + "TREE_METHOD_UNSPECIFIED", + "AUTO", + "EXACT", + "APPROX", + "HIST" + ], + "enumDescriptions": [ + "Unspecified tree method.", + "Use heuristic to choose the fastest method.", + "Exact greedy algorithm.", + "Approximate greedy algorithm using quantile sketch and gradient histogram.", + "Fast histogram optimized approximate greedy algorithm." + ], + "type": "string" + }, "userColumn": { "description": "User column specified for matrix factorization models.", "type": "string" diff --git a/googleapiclient/discovery_cache/documents/bigqueryconnection.v1beta1.json b/googleapiclient/discovery_cache/documents/bigqueryconnection.v1beta1.json index a4f29491cd3..d541abfedde 100644 --- a/googleapiclient/discovery_cache/documents/bigqueryconnection.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/bigqueryconnection.v1beta1.json @@ -395,7 +395,7 @@ } } }, - "revision": "20210906", + "revision": "20210918", "rootUrl": "https://bigqueryconnection.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/bigquerydatatransfer.v1.json b/googleapiclient/discovery_cache/documents/bigquerydatatransfer.v1.json index fb099b2fdf1..c8e1cefb253 100644 --- a/googleapiclient/discovery_cache/documents/bigquerydatatransfer.v1.json +++ b/googleapiclient/discovery_cache/documents/bigquerydatatransfer.v1.json @@ -1340,7 +1340,7 @@ } } }, - "revision": "20210910", + "revision": "20210918", "rootUrl": "https://bigquerydatatransfer.googleapis.com/", "schemas": { "CheckValidCredsRequest": { diff --git a/googleapiclient/discovery_cache/documents/bigqueryreservation.v1.json b/googleapiclient/discovery_cache/documents/bigqueryreservation.v1.json index 6d1b8c63fa4..20a8cfd101a 100644 --- a/googleapiclient/discovery_cache/documents/bigqueryreservation.v1.json +++ b/googleapiclient/discovery_cache/documents/bigqueryreservation.v1.json @@ -788,7 +788,7 @@ } } }, - "revision": "20210910", + "revision": "20210916", "rootUrl": "https://bigqueryreservation.googleapis.com/", "schemas": { "Assignment": { diff --git a/googleapiclient/discovery_cache/documents/bigqueryreservation.v1beta1.json b/googleapiclient/discovery_cache/documents/bigqueryreservation.v1beta1.json index dadae7f09ad..07fd0284f4c 100644 --- a/googleapiclient/discovery_cache/documents/bigqueryreservation.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/bigqueryreservation.v1beta1.json @@ -786,7 +786,7 @@ } } }, - "revision": "20210910", + "revision": "20210916", "rootUrl": "https://bigqueryreservation.googleapis.com/", "schemas": { "Assignment": { diff --git a/googleapiclient/discovery_cache/documents/billingbudgets.v1.json b/googleapiclient/discovery_cache/documents/billingbudgets.v1.json index f0d6ca5eb39..bac88dabedd 100644 --- a/googleapiclient/discovery_cache/documents/billingbudgets.v1.json +++ b/googleapiclient/discovery_cache/documents/billingbudgets.v1.json @@ -270,7 +270,7 @@ } } }, - "revision": "20210910", + "revision": "20210924", "rootUrl": "https://billingbudgets.googleapis.com/", "schemas": { "GoogleCloudBillingBudgetsV1Budget": { diff --git a/googleapiclient/discovery_cache/documents/billingbudgets.v1beta1.json b/googleapiclient/discovery_cache/documents/billingbudgets.v1beta1.json index b890e239c86..4b3b85beca6 100644 --- a/googleapiclient/discovery_cache/documents/billingbudgets.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/billingbudgets.v1beta1.json @@ -264,7 +264,7 @@ } } }, - "revision": "20210910", + "revision": "20210924", "rootUrl": "https://billingbudgets.googleapis.com/", "schemas": { "GoogleCloudBillingBudgetsV1beta1AllUpdatesRule": { diff --git a/googleapiclient/discovery_cache/documents/binaryauthorization.v1.json b/googleapiclient/discovery_cache/documents/binaryauthorization.v1.json index dea596abcee..d79530be980 100644 --- a/googleapiclient/discovery_cache/documents/binaryauthorization.v1.json +++ b/googleapiclient/discovery_cache/documents/binaryauthorization.v1.json @@ -551,7 +551,7 @@ } } }, - "revision": "20210910", + "revision": "20210917", "rootUrl": "https://binaryauthorization.googleapis.com/", "schemas": { "AdmissionRule": { diff --git a/googleapiclient/discovery_cache/documents/binaryauthorization.v1beta1.json b/googleapiclient/discovery_cache/documents/binaryauthorization.v1beta1.json index 22a20db12b1..e870bc49aea 100644 --- a/googleapiclient/discovery_cache/documents/binaryauthorization.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/binaryauthorization.v1beta1.json @@ -551,7 +551,7 @@ } } }, - "revision": "20210910", + "revision": "20210917", "rootUrl": "https://binaryauthorization.googleapis.com/", "schemas": { "AdmissionRule": { diff --git a/googleapiclient/discovery_cache/documents/blogger.v2.json b/googleapiclient/discovery_cache/documents/blogger.v2.json index 029165400a7..12ae33260a9 100644 --- a/googleapiclient/discovery_cache/documents/blogger.v2.json +++ b/googleapiclient/discovery_cache/documents/blogger.v2.json @@ -401,7 +401,7 @@ } } }, - "revision": "20210918", + "revision": "20210924", "rootUrl": "https://blogger.googleapis.com/", "schemas": { "Blog": { diff --git a/googleapiclient/discovery_cache/documents/blogger.v3.json b/googleapiclient/discovery_cache/documents/blogger.v3.json index 931def836ce..4d61c0773e1 100644 --- a/googleapiclient/discovery_cache/documents/blogger.v3.json +++ b/googleapiclient/discovery_cache/documents/blogger.v3.json @@ -1678,7 +1678,7 @@ } } }, - "revision": "20210918", + "revision": "20210924", "rootUrl": "https://blogger.googleapis.com/", "schemas": { "Blog": { diff --git a/googleapiclient/discovery_cache/documents/books.v1.json b/googleapiclient/discovery_cache/documents/books.v1.json index 5d111a28e8f..b2ac2ef14ee 100644 --- a/googleapiclient/discovery_cache/documents/books.v1.json +++ b/googleapiclient/discovery_cache/documents/books.v1.json @@ -2671,7 +2671,7 @@ } } }, - "revision": "20210912", + "revision": "20210920", "rootUrl": "https://books.googleapis.com/", "schemas": { "Annotation": { diff --git a/googleapiclient/discovery_cache/documents/calendar.v3.json b/googleapiclient/discovery_cache/documents/calendar.v3.json index 7b61eb19bfe..d38eda0f287 100644 --- a/googleapiclient/discovery_cache/documents/calendar.v3.json +++ b/googleapiclient/discovery_cache/documents/calendar.v3.json @@ -1723,7 +1723,7 @@ } } }, - "revision": "20210913", + "revision": "20210924", "rootUrl": "https://www.googleapis.com/", "schemas": { "Acl": { diff --git a/googleapiclient/discovery_cache/documents/chat.v1.json b/googleapiclient/discovery_cache/documents/chat.v1.json index e49c7dfb53c..87d07ad4cd2 100644 --- a/googleapiclient/discovery_cache/documents/chat.v1.json +++ b/googleapiclient/discovery_cache/documents/chat.v1.json @@ -601,7 +601,7 @@ } } }, - "revision": "20210911", + "revision": "20210918", "rootUrl": "https://chat.googleapis.com/", "schemas": { "ActionParameter": { diff --git a/googleapiclient/discovery_cache/documents/chromemanagement.v1.json b/googleapiclient/discovery_cache/documents/chromemanagement.v1.json index 664a354e797..3599a5465eb 100644 --- a/googleapiclient/discovery_cache/documents/chromemanagement.v1.json +++ b/googleapiclient/discovery_cache/documents/chromemanagement.v1.json @@ -382,7 +382,7 @@ } } }, - "revision": "20210918", + "revision": "20210924", "rootUrl": "https://chromemanagement.googleapis.com/", "schemas": { "GoogleChromeManagementV1AndroidAppInfo": { diff --git a/googleapiclient/discovery_cache/documents/chromepolicy.v1.json b/googleapiclient/discovery_cache/documents/chromepolicy.v1.json index e573bab0c65..15bb7ac6bb0 100644 --- a/googleapiclient/discovery_cache/documents/chromepolicy.v1.json +++ b/googleapiclient/discovery_cache/documents/chromepolicy.v1.json @@ -324,7 +324,7 @@ } } }, - "revision": "20210917", + "revision": "20210924", "rootUrl": "https://chromepolicy.googleapis.com/", "schemas": { "GoogleChromePolicyV1AdditionalTargetKeyName": { diff --git a/googleapiclient/discovery_cache/documents/chromeuxreport.v1.json b/googleapiclient/discovery_cache/documents/chromeuxreport.v1.json index f42460af853..c10244f1e13 100644 --- a/googleapiclient/discovery_cache/documents/chromeuxreport.v1.json +++ b/googleapiclient/discovery_cache/documents/chromeuxreport.v1.json @@ -116,7 +116,7 @@ } } }, - "revision": "20210916", + "revision": "20210923", "rootUrl": "https://chromeuxreport.googleapis.com/", "schemas": { "Bin": { diff --git a/googleapiclient/discovery_cache/documents/classroom.v1.json b/googleapiclient/discovery_cache/documents/classroom.v1.json index 835de9bc5b0..9b177555c7c 100644 --- a/googleapiclient/discovery_cache/documents/classroom.v1.json +++ b/googleapiclient/discovery_cache/documents/classroom.v1.json @@ -2400,7 +2400,7 @@ } } }, - "revision": "20210920", + "revision": "20210922", "rootUrl": "https://classroom.googleapis.com/", "schemas": { "Announcement": { diff --git a/googleapiclient/discovery_cache/documents/cloudasset.v1.json b/googleapiclient/discovery_cache/documents/cloudasset.v1.json index 2f150529e01..8e5e53c9c72 100644 --- a/googleapiclient/discovery_cache/documents/cloudasset.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudasset.v1.json @@ -727,7 +727,7 @@ } } }, - "revision": "20210910", + "revision": "20210917", "rootUrl": "https://cloudasset.googleapis.com/", "schemas": { "AccessSelector": { diff --git a/googleapiclient/discovery_cache/documents/cloudasset.v1beta1.json b/googleapiclient/discovery_cache/documents/cloudasset.v1beta1.json index 8924250f315..34ce2ee10cc 100644 --- a/googleapiclient/discovery_cache/documents/cloudasset.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudasset.v1beta1.json @@ -411,7 +411,7 @@ } } }, - "revision": "20210910", + "revision": "20210917", "rootUrl": "https://cloudasset.googleapis.com/", "schemas": { "AnalyzeIamPolicyLongrunningMetadata": { diff --git a/googleapiclient/discovery_cache/documents/cloudasset.v1p1beta1.json b/googleapiclient/discovery_cache/documents/cloudasset.v1p1beta1.json index 6a929294a24..ddceaed878e 100644 --- a/googleapiclient/discovery_cache/documents/cloudasset.v1p1beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudasset.v1p1beta1.json @@ -207,7 +207,7 @@ } } }, - "revision": "20210910", + "revision": "20210917", "rootUrl": "https://cloudasset.googleapis.com/", "schemas": { "AnalyzeIamPolicyLongrunningMetadata": { diff --git a/googleapiclient/discovery_cache/documents/cloudasset.v1p4beta1.json b/googleapiclient/discovery_cache/documents/cloudasset.v1p4beta1.json index 6b638efcd2e..60aa753504d 100644 --- a/googleapiclient/discovery_cache/documents/cloudasset.v1p4beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudasset.v1p4beta1.json @@ -221,7 +221,7 @@ } } }, - "revision": "20210910", + "revision": "20210917", "rootUrl": "https://cloudasset.googleapis.com/", "schemas": { "AccessSelector": { diff --git a/googleapiclient/discovery_cache/documents/cloudasset.v1p5beta1.json b/googleapiclient/discovery_cache/documents/cloudasset.v1p5beta1.json index de63cfb5792..aa18213fc94 100644 --- a/googleapiclient/discovery_cache/documents/cloudasset.v1p5beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudasset.v1p5beta1.json @@ -177,7 +177,7 @@ } } }, - "revision": "20210910", + "revision": "20210917", "rootUrl": "https://cloudasset.googleapis.com/", "schemas": { "AnalyzeIamPolicyLongrunningMetadata": { diff --git a/googleapiclient/discovery_cache/documents/cloudasset.v1p7beta1.json b/googleapiclient/discovery_cache/documents/cloudasset.v1p7beta1.json index 45d111d26f4..ed407f40638 100644 --- a/googleapiclient/discovery_cache/documents/cloudasset.v1p7beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudasset.v1p7beta1.json @@ -167,7 +167,7 @@ } } }, - "revision": "20210910", + "revision": "20210917", "rootUrl": "https://cloudasset.googleapis.com/", "schemas": { "AnalyzeIamPolicyLongrunningMetadata": { diff --git a/googleapiclient/discovery_cache/documents/cloudbilling.v1.json b/googleapiclient/discovery_cache/documents/cloudbilling.v1.json index 41887706536..929c52e3982 100644 --- a/googleapiclient/discovery_cache/documents/cloudbilling.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudbilling.v1.json @@ -521,7 +521,7 @@ } } }, - "revision": "20210918", + "revision": "20210924", "rootUrl": "https://cloudbilling.googleapis.com/", "schemas": { "AggregationInfo": { diff --git a/googleapiclient/discovery_cache/documents/cloudchannel.v1.json b/googleapiclient/discovery_cache/documents/cloudchannel.v1.json index 2e889f48a86..6eaf2852100 100644 --- a/googleapiclient/discovery_cache/documents/cloudchannel.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudchannel.v1.json @@ -1589,7 +1589,7 @@ } } }, - "revision": "20210918", + "revision": "20210924", "rootUrl": "https://cloudchannel.googleapis.com/", "schemas": { "GoogleCloudChannelV1ActivateEntitlementRequest": { diff --git a/googleapiclient/discovery_cache/documents/clouddebugger.v2.json b/googleapiclient/discovery_cache/documents/clouddebugger.v2.json index 774d618908d..f16deab7c35 100644 --- a/googleapiclient/discovery_cache/documents/clouddebugger.v2.json +++ b/googleapiclient/discovery_cache/documents/clouddebugger.v2.json @@ -448,7 +448,7 @@ } } }, - "revision": "20210910", + "revision": "20210917", "rootUrl": "https://clouddebugger.googleapis.com/", "schemas": { "AliasContext": { diff --git a/googleapiclient/discovery_cache/documents/clouderrorreporting.v1beta1.json b/googleapiclient/discovery_cache/documents/clouderrorreporting.v1beta1.json index d5f9688568d..08d606f63d1 100644 --- a/googleapiclient/discovery_cache/documents/clouderrorreporting.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/clouderrorreporting.v1beta1.json @@ -430,7 +430,7 @@ } } }, - "revision": "20210826", + "revision": "20210916", "rootUrl": "https://clouderrorreporting.googleapis.com/", "schemas": { "DeleteEventsResponse": { @@ -676,7 +676,7 @@ "description": "Optional. A description of the context in which the error occurred." }, "eventTime": { - "description": "Optional. Time when the event occurred. If not provided, the time when the event was received by the Error Reporting system will be used.", + "description": "Optional. Time when the event occurred. If not provided, the time when the event was received by the Error Reporting system is used. If provided, the time must not exceed the [logs retention period](https://cloud.google.com/logging/quotas#logs_retention_periods) in the past, or be more than 24 hours in the future. If an invalid time is provided, then an error is returned.", "format": "google-datetime", "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/cloudfunctions.v1.json b/googleapiclient/discovery_cache/documents/cloudfunctions.v1.json index f2cc7708461..b2b8e4a960d 100644 --- a/googleapiclient/discovery_cache/documents/cloudfunctions.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudfunctions.v1.json @@ -546,7 +546,7 @@ } } }, - "revision": "20210907", + "revision": "20210916", "rootUrl": "https://cloudfunctions.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/cloudidentity.v1.json b/googleapiclient/discovery_cache/documents/cloudidentity.v1.json index 15b834ee488..9323bdce7dc 100644 --- a/googleapiclient/discovery_cache/documents/cloudidentity.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudidentity.v1.json @@ -1273,7 +1273,7 @@ } } }, - "revision": "20210913", + "revision": "20210920", "rootUrl": "https://cloudidentity.googleapis.com/", "schemas": { "CheckTransitiveMembershipResponse": { @@ -1335,7 +1335,7 @@ "id": "DynamicGroupQuery", "properties": { "query": { - "description": "Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')`", + "description": "Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')` All users with any variation of the name John Doe (case-insensitive queries add `equalsIgnoreCase()` to the value being queried). `user.name.value.equalsIgnoreCase('jOhn DoE')`", "type": "string" }, "resourceType": { diff --git a/googleapiclient/discovery_cache/documents/cloudidentity.v1beta1.json b/googleapiclient/discovery_cache/documents/cloudidentity.v1beta1.json index fe419da32b6..8c1e6d62f62 100644 --- a/googleapiclient/discovery_cache/documents/cloudidentity.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudidentity.v1beta1.json @@ -1336,7 +1336,7 @@ } } }, - "revision": "20210913", + "revision": "20210920", "rootUrl": "https://cloudidentity.googleapis.com/", "schemas": { "AndroidAttributes": { @@ -1943,7 +1943,7 @@ "id": "DynamicGroupQuery", "properties": { "query": { - "description": "Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')`", + "description": "Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')` All users with any variation of the name John Doe (case-insensitive queries add `equalsIgnoreCase()` to the value being queried). `user.name.value.equalsIgnoreCase('jOhn DoE')`", "type": "string" }, "resourceType": { diff --git a/googleapiclient/discovery_cache/documents/cloudiot.v1.json b/googleapiclient/discovery_cache/documents/cloudiot.v1.json index 57ed2b47434..e4f221b81c8 100644 --- a/googleapiclient/discovery_cache/documents/cloudiot.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudiot.v1.json @@ -938,7 +938,7 @@ } } }, - "revision": "20210906", + "revision": "20210913", "rootUrl": "https://cloudiot.googleapis.com/", "schemas": { "BindDeviceToGatewayRequest": { diff --git a/googleapiclient/discovery_cache/documents/cloudkms.v1.json b/googleapiclient/discovery_cache/documents/cloudkms.v1.json index 7dbc5e06c76..eed87b9cf3e 100644 --- a/googleapiclient/discovery_cache/documents/cloudkms.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudkms.v1.json @@ -1346,7 +1346,7 @@ } } }, - "revision": "20210906", + "revision": "20210921", "rootUrl": "https://cloudkms.googleapis.com/", "schemas": { "AsymmetricDecryptRequest": { diff --git a/googleapiclient/discovery_cache/documents/cloudprofiler.v2.json b/googleapiclient/discovery_cache/documents/cloudprofiler.v2.json index d7486213a3d..cb5ff922b20 100644 --- a/googleapiclient/discovery_cache/documents/cloudprofiler.v2.json +++ b/googleapiclient/discovery_cache/documents/cloudprofiler.v2.json @@ -216,7 +216,7 @@ } } }, - "revision": "20210910", + "revision": "20210919", "rootUrl": "https://cloudprofiler.googleapis.com/", "schemas": { "CreateProfileRequest": { diff --git a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1.json b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1.json index 8fc4a6676af..ff73093c64e 100644 --- a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1.json @@ -1171,7 +1171,7 @@ } } }, - "revision": "20210905", + "revision": "20210917", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "Ancestor": { diff --git a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1beta1.json b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1beta1.json index 8934404a7fb..660f902f379 100644 --- a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1beta1.json @@ -566,7 +566,7 @@ } } }, - "revision": "20210905", + "revision": "20210917", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "Ancestor": { diff --git a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2.json b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2.json index f1d7f539d3c..7f53ee76ba4 100644 --- a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2.json +++ b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2.json @@ -450,7 +450,7 @@ } } }, - "revision": "20210905", + "revision": "20210917", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2beta1.json b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2beta1.json index 42b546d6e75..4416b882c44 100644 --- a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2beta1.json @@ -450,7 +450,7 @@ } } }, - "revision": "20210905", + "revision": "20210917", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v3.json b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v3.json index 45f8f602548..4aa7fec846e 100644 --- a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v3.json +++ b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v3.json @@ -1612,7 +1612,7 @@ } } }, - "revision": "20210905", + "revision": "20210917", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/cloudscheduler.v1.json b/googleapiclient/discovery_cache/documents/cloudscheduler.v1.json index 6903a36dfd3..f410eac4b20 100644 --- a/googleapiclient/discovery_cache/documents/cloudscheduler.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudscheduler.v1.json @@ -418,7 +418,7 @@ } } }, - "revision": "20210827", + "revision": "20210914", "rootUrl": "https://cloudscheduler.googleapis.com/", "schemas": { "AppEngineHttpTarget": { diff --git a/googleapiclient/discovery_cache/documents/cloudscheduler.v1beta1.json b/googleapiclient/discovery_cache/documents/cloudscheduler.v1beta1.json index bf3416ac6da..5dcb9267bd7 100644 --- a/googleapiclient/discovery_cache/documents/cloudscheduler.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudscheduler.v1beta1.json @@ -433,7 +433,7 @@ } } }, - "revision": "20210827", + "revision": "20210914", "rootUrl": "https://cloudscheduler.googleapis.com/", "schemas": { "AppEngineHttpTarget": { diff --git a/googleapiclient/discovery_cache/documents/cloudshell.v1.json b/googleapiclient/discovery_cache/documents/cloudshell.v1.json index 2327cdc6f43..08b6888b29b 100644 --- a/googleapiclient/discovery_cache/documents/cloudshell.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudshell.v1.json @@ -374,7 +374,7 @@ } } }, - "revision": "20210910", + "revision": "20210918", "rootUrl": "https://cloudshell.googleapis.com/", "schemas": { "AddPublicKeyMetadata": { diff --git a/googleapiclient/discovery_cache/documents/cloudtasks.v2beta2.json b/googleapiclient/discovery_cache/documents/cloudtasks.v2beta2.json index f078e14af52..9bc6c130bc3 100644 --- a/googleapiclient/discovery_cache/documents/cloudtasks.v2beta2.json +++ b/googleapiclient/discovery_cache/documents/cloudtasks.v2beta2.json @@ -809,7 +809,7 @@ } } }, - "revision": "20210830", + "revision": "20210920", "rootUrl": "https://cloudtasks.googleapis.com/", "schemas": { "AcknowledgeTaskRequest": { diff --git a/googleapiclient/discovery_cache/documents/cloudtasks.v2beta3.json b/googleapiclient/discovery_cache/documents/cloudtasks.v2beta3.json index 9fbcfe4a904..19642242ef9 100644 --- a/googleapiclient/discovery_cache/documents/cloudtasks.v2beta3.json +++ b/googleapiclient/discovery_cache/documents/cloudtasks.v2beta3.json @@ -697,7 +697,7 @@ } } }, - "revision": "20210830", + "revision": "20210920", "rootUrl": "https://cloudtasks.googleapis.com/", "schemas": { "AppEngineHttpQueue": { diff --git a/googleapiclient/discovery_cache/documents/cloudtrace.v1.json b/googleapiclient/discovery_cache/documents/cloudtrace.v1.json index 6745d4e3d80..3dfe5365e05 100644 --- a/googleapiclient/discovery_cache/documents/cloudtrace.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudtrace.v1.json @@ -257,7 +257,7 @@ } } }, - "revision": "20210827", + "revision": "20210917", "rootUrl": "https://cloudtrace.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/cloudtrace.v2.json b/googleapiclient/discovery_cache/documents/cloudtrace.v2.json index 5ea0fee344d..44153725fb8 100644 --- a/googleapiclient/discovery_cache/documents/cloudtrace.v2.json +++ b/googleapiclient/discovery_cache/documents/cloudtrace.v2.json @@ -181,7 +181,7 @@ } } }, - "revision": "20210827", + "revision": "20210917", "rootUrl": "https://cloudtrace.googleapis.com/", "schemas": { "Annotation": { diff --git a/googleapiclient/discovery_cache/documents/cloudtrace.v2beta1.json b/googleapiclient/discovery_cache/documents/cloudtrace.v2beta1.json index 7ade4489145..53c03720b32 100644 --- a/googleapiclient/discovery_cache/documents/cloudtrace.v2beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudtrace.v2beta1.json @@ -273,7 +273,7 @@ } } }, - "revision": "20210827", + "revision": "20210917", "rootUrl": "https://cloudtrace.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/composer.v1.json b/googleapiclient/discovery_cache/documents/composer.v1.json index b628aac9700..392bf3c1358 100644 --- a/googleapiclient/discovery_cache/documents/composer.v1.json +++ b/googleapiclient/discovery_cache/documents/composer.v1.json @@ -242,7 +242,7 @@ "type": "string" }, "updateMask": { - "description": "Required. A comma-separated list of paths, relative to `Environment`, of fields to update. For example, to set the version of scikit-learn to install in the environment to 0.19.0 and to remove an existing installation of numpy, the `updateMask` parameter would include the following two `paths` values: \"config.softwareConfig.pypiPackages.scikit-learn\" and \"config.softwareConfig.pypiPackages.numpy\". The included patch environment would specify the scikit-learn version as follows: { \"config\":{ \"softwareConfig\":{ \"pypiPackages\":{ \"scikit-learn\":\"==0.19.0\" } } } } Note that in the above example, any existing PyPI packages other than scikit-learn and numpy will be unaffected. Only one update type may be included in a single request's `updateMask`. For example, one cannot update both the PyPI packages and labels in the same request. However, it is possible to update multiple members of a map field simultaneously in the same request. For example, to set the labels \"label1\" and \"label2\" while clearing \"label3\" (assuming it already exists), one can provide the paths \"labels.label1\", \"labels.label2\", and \"labels.label3\" and populate the patch environment as follows: { \"labels\":{ \"label1\":\"new-label1-value\" \"label2\":\"new-label2-value\" } } Note that in the above example, any existing labels that are not included in the `updateMask` will be unaffected. It is also possible to replace an entire map field by providing the map field's path in the `updateMask`. The new value of the field will be that which is provided in the patch environment. For example, to delete all pre-existing user-specified PyPI packages and install botocore at version 1.7.14, the `updateMask` would contain the path \"config.softwareConfig.pypiPackages\", and the patch environment would be the following: { \"config\":{ \"softwareConfig\":{ \"pypiPackages\":{ \"botocore\":\"==1.7.14\" } } } } **Note:** Only the following fields can be updated: * `config.softwareConfig.pypiPackages` * Replace all custom custom PyPI packages. If a replacement package map is not included in `environment`, all custom PyPI packages are cleared. It is an error to provide both this mask and a mask specifying an individual package. * `config.softwareConfig.pypiPackages.`packagename * Update the custom PyPI package *packagename*, preserving other packages. To delete the package, include it in `updateMask`, and omit the mapping for it in `environment.config.softwareConfig.pypiPackages`. It is an error to provide both a mask of this form and the `config.softwareConfig.pypiPackages` mask. * `labels` * Replace all environment labels. If a replacement labels map is not included in `environment`, all labels are cleared. It is an error to provide both this mask and a mask specifying one or more individual labels. * `labels.`labelName * Set the label named *labelName*, while preserving other labels. To delete the label, include it in `updateMask` and omit its mapping in `environment.labels`. It is an error to provide both a mask of this form and the `labels` mask. * `config.nodeCount` * Horizontally scale the number of nodes in the environment. An integer greater than or equal to 3 must be provided in the `config.nodeCount` field. * `config.webServerNetworkAccessControl` * Replace the environment's current `WebServerNetworkAccessControl`. * `config.databaseConfig` * Replace the environment's current `DatabaseConfig`. * `config.webServerConfig` * Replace the environment's current `WebServerConfig`. * `config.softwareConfig.airflowConfigOverrides` * Replace all Apache Airflow config overrides. If a replacement config overrides map is not included in `environment`, all config overrides are cleared. It is an error to provide both this mask and a mask specifying one or more individual config overrides. * `config.softwareConfig.airflowConfigOverrides.`section-name * Override the Apache Airflow config property *name* in the section named *section*, preserving other properties. To delete the property override, include it in `updateMask` and omit its mapping in `environment.config.softwareConfig.airflowConfigOverrides`. It is an error to provide both a mask of this form and the `config.softwareConfig.airflowConfigOverrides` mask. * `config.softwareConfig.envVariables` * Replace all environment variables. If a replacement environment variable map is not included in `environment`, all custom environment variables are cleared. It is an error to provide both this mask and a mask specifying one or more individual environment variables. * `config.softwareConfig.schedulerCount` * Horizontally scale the number of schedulers in Airflow. A positive integer not greater than the number of nodes must be provided in the `config.softwareConfig.schedulerCount` field. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-2.*.*.", + "description": "Required. A comma-separated list of paths, relative to `Environment`, of fields to update. For example, to set the version of scikit-learn to install in the environment to 0.19.0 and to remove an existing installation of numpy, the `updateMask` parameter would include the following two `paths` values: \"config.softwareConfig.pypiPackages.scikit-learn\" and \"config.softwareConfig.pypiPackages.numpy\". The included patch environment would specify the scikit-learn version as follows: { \"config\":{ \"softwareConfig\":{ \"pypiPackages\":{ \"scikit-learn\":\"==0.19.0\" } } } } Note that in the above example, any existing PyPI packages other than scikit-learn and numpy will be unaffected. Only one update type may be included in a single request's `updateMask`. For example, one cannot update both the PyPI packages and labels in the same request. However, it is possible to update multiple members of a map field simultaneously in the same request. For example, to set the labels \"label1\" and \"label2\" while clearing \"label3\" (assuming it already exists), one can provide the paths \"labels.label1\", \"labels.label2\", and \"labels.label3\" and populate the patch environment as follows: { \"labels\":{ \"label1\":\"new-label1-value\" \"label2\":\"new-label2-value\" } } Note that in the above example, any existing labels that are not included in the `updateMask` will be unaffected. It is also possible to replace an entire map field by providing the map field's path in the `updateMask`. The new value of the field will be that which is provided in the patch environment. For example, to delete all pre-existing user-specified PyPI packages and install botocore at version 1.7.14, the `updateMask` would contain the path \"config.softwareConfig.pypiPackages\", and the patch environment would be the following: { \"config\":{ \"softwareConfig\":{ \"pypiPackages\":{ \"botocore\":\"==1.7.14\" } } } } **Note:** Only the following fields can be updated: * `config.softwareConfig.pypiPackages` * Replace all custom custom PyPI packages. If a replacement package map is not included in `environment`, all custom PyPI packages are cleared. It is an error to provide both this mask and a mask specifying an individual package. * `config.softwareConfig.pypiPackages.`packagename * Update the custom PyPI package *packagename*, preserving other packages. To delete the package, include it in `updateMask`, and omit the mapping for it in `environment.config.softwareConfig.pypiPackages`. It is an error to provide both a mask of this form and the `config.softwareConfig.pypiPackages` mask. * `labels` * Replace all environment labels. If a replacement labels map is not included in `environment`, all labels are cleared. It is an error to provide both this mask and a mask specifying one or more individual labels. * `labels.`labelName * Set the label named *labelName*, while preserving other labels. To delete the label, include it in `updateMask` and omit its mapping in `environment.labels`. It is an error to provide both a mask of this form and the `labels` mask. * `config.nodeCount` * Horizontally scale the number of nodes in the environment. An integer greater than or equal to 3 must be provided in the `config.nodeCount` field. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. * `config.webServerNetworkAccessControl` * Replace the environment's current `WebServerNetworkAccessControl`. * `config.databaseConfig` Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. * Replace the environment's current `DatabaseConfig`. * `config.webServerConfig.machineType` * Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. * `config.softwareConfig.airflowConfigOverrides` * Replace all Apache Airflow config overrides. If a replacement config overrides map is not included in `environment`, all config overrides are cleared. It is an error to provide both this mask and a mask specifying one or more individual config overrides. * `config.softwareConfig.airflowConfigOverrides.`section-name * Override the Apache Airflow config property *name* in the section named *section*, preserving other properties. To delete the property override, include it in `updateMask` and omit its mapping in `environment.config.softwareConfig.airflowConfigOverrides`. It is an error to provide both a mask of this form and the `config.softwareConfig.airflowConfigOverrides` mask. * `config.softwareConfig.envVariables` * Replace all environment variables. If a replacement environment variable map is not included in `environment`, all custom environment variables are cleared. It is an error to provide both this mask and a mask specifying one or more individual environment variables. * `config.softwareConfig.schedulerCount` * Horizontally scale the number of schedulers in Airflow. A positive integer not greater than the number of nodes must be provided in the `config.softwareConfig.schedulerCount` field. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-2.*.*. * `config.databaseConfig.machineType` * Cloud SQL machine type used by Airflow database. It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 or db-n1-standard-16. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. * `config.webServerConfig.machineType` * Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -406,7 +406,7 @@ } } }, - "revision": "20210913", + "revision": "20210922", "rootUrl": "https://composer.googleapis.com/", "schemas": { "AllowedIpRange": { @@ -468,7 +468,7 @@ "type": "object" }, "DatabaseConfig": { - "description": "The configuration of Cloud SQL instance that is used by the Apache Airflow software.", + "description": "The configuration of Cloud SQL instance that is used by the Apache Airflow software. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.", "id": "DatabaseConfig", "properties": { "machineType": { @@ -507,7 +507,7 @@ "type": "object" }, "EncryptionConfig": { - "description": "The encryption options for the Cloud Composer environment and its dependencies.", + "description": "The encryption options for the Cloud Composer environment and its dependencies.Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.", "id": "EncryptionConfig", "properties": { "kmsKeyName": { @@ -587,11 +587,11 @@ }, "databaseConfig": { "$ref": "DatabaseConfig", - "description": "Optional. The configuration settings for Cloud SQL instance used internally by Apache Airflow software." + "description": "Optional. The configuration settings for Cloud SQL instance used internally by Apache Airflow software. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*." }, "encryptionConfig": { "$ref": "EncryptionConfig", - "description": "Optional. The encryption options for the Cloud Composer environment and its dependencies. Cannot be updated." + "description": "Optional. The encryption options for the Cloud Composer environment and its dependencies. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*." }, "gkeCluster": { "description": "Output only. The Kubernetes Engine cluster used to run this environment.", @@ -602,7 +602,7 @@ "description": "The configuration used for the Kubernetes Engine cluster." }, "nodeCount": { - "description": "The number of nodes in the Kubernetes Engine cluster that will be used to run this environment.", + "description": "The number of nodes in the Kubernetes Engine cluster that will be used to run this environment. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.", "format": "int32", "type": "integer" }, @@ -620,7 +620,7 @@ }, "webServerNetworkAccessControl": { "$ref": "WebServerNetworkAccessControl", - "description": "Optional. The network-level access control policy for the Airflow web server. If unspecified, no network-level access restrictions will be applied." + "description": "Optional. The network-level access control policy for the Airflow web server. If unspecified, no network-level access restrictions will be applied. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*." } }, "type": "object" @@ -630,23 +630,23 @@ "id": "IPAllocationPolicy", "properties": { "clusterIpv4CidrBlock": { - "description": "Optional. The IP address range used to allocate IP addresses to pods in the GKE cluster. This field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.", + "description": "Optional. The IP address range used to allocate IP addresses to pods in the GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.", "type": "string" }, "clusterSecondaryRangeName": { - "description": "Optional. The name of the GKE cluster's secondary range used to allocate IP addresses to pods. This field is applicable only when `use_ip_aliases` is true.", + "description": "Optional. The name of the GKE cluster's secondary range used to allocate IP addresses to pods. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.", "type": "string" }, "servicesIpv4CidrBlock": { - "description": "Optional. The IP address range of the services IP addresses in this GKE cluster. This field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.", + "description": "Optional. The IP address range of the services IP addresses in this GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.", "type": "string" }, "servicesSecondaryRangeName": { - "description": "Optional. The name of the services' secondary range used to allocate IP addresses to the GKE cluster. This field is applicable only when `use_ip_aliases` is true.", + "description": "Optional. The name of the services' secondary range used to allocate IP addresses to the GKE cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.", "type": "string" }, "useIpAliases": { - "description": "Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created.", + "description": "Optional. Whether or not to enable Alias IPs in the GKE cluster. If `true`, a VPC-native cluster is created. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use VPC-native GKE clusters.", "type": "boolean" } }, @@ -745,7 +745,7 @@ "id": "NodeConfig", "properties": { "diskSizeGb": { - "description": "Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated.", + "description": "Optional. The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.", "format": "int32", "type": "integer" }, @@ -754,11 +754,11 @@ "description": "Optional. The configuration for controlling how IPs are allocated in the GKE cluster." }, "location": { - "description": "Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: \"projects/{projectId}/zones/{zoneId}\". This `location` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field.", + "description": "Optional. The Compute Engine [zone](/compute/docs/regions-zones) in which to deploy the VMs used to run the Apache Airflow software, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: \"projects/{projectId}/zones/{zoneId}\". This `location` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.machineType` are specified, `nodeConfig.machineType` must belong to this `location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If only one field (`location` or `nodeConfig.machineType`) is specified, the location information from the specified field will be propagated to the unspecified field. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.", "type": "string" }, "machineType": { - "description": "Optional. The Compute Engine [machine type](/compute/docs/machine-types) used for cluster instances, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: \"projects/{projectId}/zones/{zoneId}/machineTypes/{machineTypeId}\". The `machineType` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.location` are specified, this `machineType` must belong to the `nodeConfig.location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If exactly one of this field and `nodeConfig.location` is specified, the location information from the specified field will be propagated to the unspecified field. The `machineTypeId` must not be a [shared-core machine type](/compute/docs/machine-types#sharedcore). If this field is unspecified, the `machineTypeId` defaults to \"n1-standard-1\".", + "description": "Optional. The Compute Engine [machine type](/compute/docs/machine-types) used for cluster instances, specified as a [relative resource name](/apis/design/resource_names#relative_resource_name). For example: \"projects/{projectId}/zones/{zoneId}/machineTypes/{machineTypeId}\". The `machineType` must belong to the enclosing environment's project and location. If both this field and `nodeConfig.location` are specified, this `machineType` must belong to the `nodeConfig.location`; if both are unspecified, the service will pick a zone in the Compute Engine region corresponding to the Cloud Composer location, and propagate that choice to both fields. If exactly one of this field and `nodeConfig.location` is specified, the location information from the specified field will be propagated to the unspecified field. The `machineTypeId` must not be a [shared-core machine type](/compute/docs/machine-types#sharedcore). If this field is unspecified, the `machineTypeId` defaults to \"n1-standard-1\". This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.", "type": "string" }, "network": { @@ -766,7 +766,7 @@ "type": "string" }, "oauthScopes": { - "description": "Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to [\"https://www.googleapis.com/auth/cloud-platform\"]. Cannot be updated.", + "description": "Optional. The set of Google API scopes to be made available on all node VMs. If `oauth_scopes` is empty, defaults to [\"https://www.googleapis.com/auth/cloud-platform\"]. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.", "items": { "type": "string" }, @@ -781,7 +781,7 @@ "type": "string" }, "tags": { - "description": "Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated.", + "description": "Optional. The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.", "items": { "type": "string" }, @@ -917,7 +917,7 @@ "type": "string" }, "enablePrivateEnvironment": { - "description": "Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true.", + "description": "Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.", "type": "boolean" }, "privateClusterConfig": { @@ -925,11 +925,11 @@ "description": "Optional. Configuration for the private GKE cluster for a Private IP Cloud Composer environment." }, "webServerIpv4CidrBlock": { - "description": "Optional. The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from `private_cluster_config.master_ipv4_cidr_block` and `cloud_sql_ipv4_cidr_block`.", + "description": "Optional. The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from `private_cluster_config.master_ipv4_cidr_block` and `cloud_sql_ipv4_cidr_block`. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.", "type": "string" }, "webServerIpv4ReservedRange": { - "description": "Output only. The IP range reserved for the tenant project's App Engine VMs.", + "description": "Output only. The IP range reserved for the tenant project's App Engine VMs. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.", "readOnly": true, "type": "string" } @@ -966,7 +966,7 @@ "type": "object" }, "pythonVersion": { - "description": "Optional. The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to '2' or '3'. If not specified, the default is '3'. Cannot be updated.", + "description": "Optional. The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to '2' or '3'. If not specified, the default is '3'. Cannot be updated. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use Python major version 3.", "type": "string" }, "schedulerCount": { @@ -1005,7 +1005,7 @@ "type": "object" }, "WebServerConfig": { - "description": "The configuration settings for the Airflow web server App Engine instance.", + "description": "The configuration settings for the Airflow web server App Engine instance. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*", "id": "WebServerConfig", "properties": { "machineType": { @@ -1016,7 +1016,7 @@ "type": "object" }, "WebServerNetworkAccessControl": { - "description": "Network-level access control policy for the Airflow web server.", + "description": "Network-level access control policy for the Airflow web server. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.", "id": "WebServerNetworkAccessControl", "properties": { "allowedIpRanges": { diff --git a/googleapiclient/discovery_cache/documents/composer.v1beta1.json b/googleapiclient/discovery_cache/documents/composer.v1beta1.json index 68996e28318..7dc3c521cf8 100644 --- a/googleapiclient/discovery_cache/documents/composer.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/composer.v1beta1.json @@ -462,7 +462,7 @@ } } }, - "revision": "20210913", + "revision": "20210922", "rootUrl": "https://composer.googleapis.com/", "schemas": { "AllowedIpRange": { @@ -727,19 +727,19 @@ "id": "IPAllocationPolicy", "properties": { "clusterIpv4CidrBlock": { - "description": "Optional. The IP address range used to allocate IP addresses to pods in the cluster. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both.", + "description": "Optional. The IP address range used to allocate IP addresses to pods in the cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both.", "type": "string" }, "clusterSecondaryRangeName": { - "description": "Optional. The name of the cluster's secondary range used to allocate IP addresses to pods. Specify either `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.", + "description": "Optional. The name of the cluster's secondary range used to allocate IP addresses to pods. Specify either `cluster_secondary_range_name` or `cluster_ipv4_cidr_block` but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.", "type": "string" }, "servicesIpv4CidrBlock": { - "description": "Optional. The IP address range of the services IP addresses in this cluster. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `services_secondary_range_name` or `services_ipv4_cidr_block` but not both.", + "description": "Optional. The IP address range of the services IP addresses in this cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. `/14`) to have GKE choose a range with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use. Specify `services_secondary_range_name` or `services_ipv4_cidr_block` but not both.", "type": "string" }, "servicesSecondaryRangeName": { - "description": "Optional. The name of the services' secondary range used to allocate IP addresses to the cluster. Specify either `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. This field is applicable only when `use_ip_aliases` is true. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.", + "description": "Optional. The name of the services' secondary range used to allocate IP addresses to the cluster. Specify either `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when `use_ip_aliases` is true.", "type": "string" }, "useIpAliases": { diff --git a/googleapiclient/discovery_cache/documents/compute.alpha.json b/googleapiclient/discovery_cache/documents/compute.alpha.json index 535837c1340..de25756ecf4 100644 --- a/googleapiclient/discovery_cache/documents/compute.alpha.json +++ b/googleapiclient/discovery_cache/documents/compute.alpha.json @@ -37874,7 +37874,7 @@ } } }, - "revision": "20210907", + "revision": "20210916", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AcceleratorConfig": { diff --git a/googleapiclient/discovery_cache/documents/compute.beta.json b/googleapiclient/discovery_cache/documents/compute.beta.json index 36bddff6e4e..1d03f83b621 100644 --- a/googleapiclient/discovery_cache/documents/compute.beta.json +++ b/googleapiclient/discovery_cache/documents/compute.beta.json @@ -32511,7 +32511,7 @@ } } }, - "revision": "20210907", + "revision": "20210916", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -35060,6 +35060,7 @@ "description": "Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer.", "enum": [ "EXTERNAL", + "EXTERNAL_MANAGED", "INTERNAL", "INTERNAL_MANAGED", "INTERNAL_SELF_MANAGED", @@ -35067,6 +35068,7 @@ ], "enumDescriptions": [ "Signifies that this will be used for external HTTP(S), SSL Proxy, TCP Proxy, or Network Load Balancing", + "Signifies that this will be used for External Managed HTTP(S), SSL Proxy, or TCP Proxy Load Balancing.", "Signifies that this will be used for Internal TCP/UDP Load Balancing.", "Signifies that this will be used for Internal HTTP(S) Load Balancing.", "Signifies that this will be used by Traffic Director.", @@ -39257,6 +39259,7 @@ "description": "Specifies the forwarding rule type. For more information about forwarding rules, refer to Forwarding rule concepts.", "enum": [ "EXTERNAL", + "EXTERNAL_MANAGED", "INTERNAL", "INTERNAL_MANAGED", "INTERNAL_SELF_MANAGED", @@ -39267,6 +39270,7 @@ "", "", "", + "", "" ], "type": "string" diff --git a/googleapiclient/discovery_cache/documents/compute.v1.json b/googleapiclient/discovery_cache/documents/compute.v1.json index 9537e91a7ea..37b868e0062 100644 --- a/googleapiclient/discovery_cache/documents/compute.v1.json +++ b/googleapiclient/discovery_cache/documents/compute.v1.json @@ -28278,7 +28278,7 @@ } } }, - "revision": "20210907", + "revision": "20210916", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AcceleratorConfig": { diff --git a/googleapiclient/discovery_cache/documents/container.v1.json b/googleapiclient/discovery_cache/documents/container.v1.json index 53065bd8a2d..2d2615ba7d8 100644 --- a/googleapiclient/discovery_cache/documents/container.v1.json +++ b/googleapiclient/discovery_cache/documents/container.v1.json @@ -2459,7 +2459,7 @@ } } }, - "revision": "20210902", + "revision": "20210910", "rootUrl": "https://container.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -4291,12 +4291,12 @@ "type": "boolean" }, "maxNodeCount": { - "description": "Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster.", + "description": "Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster.", "format": "int32", "type": "integer" }, "minNodeCount": { - "description": "Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count.", + "description": "Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count.", "format": "int32", "type": "integer" } diff --git a/googleapiclient/discovery_cache/documents/container.v1beta1.json b/googleapiclient/discovery_cache/documents/container.v1beta1.json index f69239fde89..20854492a90 100644 --- a/googleapiclient/discovery_cache/documents/container.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/container.v1beta1.json @@ -2484,7 +2484,7 @@ } } }, - "revision": "20210902", + "revision": "20210906", "rootUrl": "https://container.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -4652,12 +4652,12 @@ "type": "boolean" }, "maxNodeCount": { - "description": "Maximum number of nodes in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster.", + "description": "Maximum number of nodes for one location in the NodePool. Must be >= min_node_count. There has to be enough quota to scale up the cluster.", "format": "int32", "type": "integer" }, "minNodeCount": { - "description": "Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count.", + "description": "Minimum number of nodes for one location in the NodePool. Must be >= 1 and <= max_node_count.", "format": "int32", "type": "integer" } diff --git a/googleapiclient/discovery_cache/documents/containeranalysis.v1alpha1.json b/googleapiclient/discovery_cache/documents/containeranalysis.v1alpha1.json index 2b69234002f..fe15c2a0bf0 100644 --- a/googleapiclient/discovery_cache/documents/containeranalysis.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/containeranalysis.v1alpha1.json @@ -1229,7 +1229,7 @@ } } }, - "revision": "20210913", + "revision": "20210917", "rootUrl": "https://containeranalysis.googleapis.com/", "schemas": { "Artifact": { diff --git a/googleapiclient/discovery_cache/documents/containeranalysis.v1beta1.json b/googleapiclient/discovery_cache/documents/containeranalysis.v1beta1.json index 5945d060df6..b93cd9a678d 100644 --- a/googleapiclient/discovery_cache/documents/containeranalysis.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/containeranalysis.v1beta1.json @@ -853,7 +853,7 @@ } } }, - "revision": "20210913", + "revision": "20210917", "rootUrl": "https://containeranalysis.googleapis.com/", "schemas": { "AliasContext": { diff --git a/googleapiclient/discovery_cache/documents/content.v2.1.json b/googleapiclient/discovery_cache/documents/content.v2.1.json index 7b92f650dc8..67df490bae5 100644 --- a/googleapiclient/discovery_cache/documents/content.v2.1.json +++ b/googleapiclient/discovery_cache/documents/content.v2.1.json @@ -3067,7 +3067,7 @@ ] }, "captureOrder": { - "description": "Capture funds from the customer for the current order total. This method should be called after the merchant verifies that they are able and ready to start shipping the order. This method blocks until a response is received from the payment processsor. If this method succeeds, the merchant is guaranteed to receive funds for the order after shipment. If the request fails, it can be retried or the order may be cancelled. This method cannot be called after the entire order is already shipped.", + "description": "Capture funds from the customer for the current order total. This method should be called after the merchant verifies that they are able and ready to start shipping the order. This method blocks until a response is received from the payment processsor. If this method succeeds, the merchant is guaranteed to receive funds for the order after shipment. If the request fails, it can be retried or the order may be cancelled. This method cannot be called after the entire order is already shipped. A rejected error code is returned when the payment service provider has declined the charge. This indicates a problem between the PSP and either the merchant's or customer's account. Sometimes this error will be resolved by the customer. We recommend retrying these errors once per day or cancelling the order with reason `failedToCaptureFunds` if the items cannot be held.", "flatPath": "{merchantId}/orders/{orderId}/captureOrder", "httpMethod": "POST", "id": "content.orders.captureOrder", @@ -5783,7 +5783,7 @@ } } }, - "revision": "20210916", + "revision": "20210924", "rootUrl": "https://shoppingcontent.googleapis.com/", "schemas": { "Account": { @@ -5930,7 +5930,7 @@ "type": "string" }, "phoneNumber": { - "description": "The phone number of the business.", + "description": "! The phone number of the business. This can only be updated if a verified ! phone number is not already set. To replace a verified phone number use ! the `Accounts.requestphoneverification` and ! `Accounts.verifyphonenumber`.", "type": "string" } }, @@ -6517,6 +6517,10 @@ "description": "Action to perform for this link. The `\"request\"` action is only available to select merchants. Acceptable values are: - \"`approve`\" - \"`remove`\" - \"`request`\" ", "type": "string" }, + "eCommercePlatformLinkInfo": { + "$ref": "ECommercePlatformLinkInfo", + "description": "Additional information required for `eCommercePlatform` link type." + }, "linkType": { "description": "Type of the link between the two accounts. Acceptable values are: - \"`channelPartner`\" - \"`eCommercePlatform`\" - \"`paymentServiceProvider`\" ", "type": "string" @@ -7893,6 +7897,17 @@ }, "type": "object" }, + "ECommercePlatformLinkInfo": { + "description": "Additional information required for E_COMMERCE_PLATFORM link type.", + "id": "ECommercePlatformLinkInfo", + "properties": { + "externalAccountId": { + "description": "The id used by the third party service provider to identify the merchant.", + "type": "string" + } + }, + "type": "object" + }, "Error": { "description": "An error returned by the API.", "id": "Error", @@ -8849,7 +8864,7 @@ "type": "string" }, "pickupMethod": { - "description": "Supported pickup method for this offer. Unless the value is \"not supported\", this field must be submitted together with `pickupSla`. For accepted attribute values, see the local product inventory feed // specification.", + "description": "Supported pickup method for this offer. Unless the value is \"not supported\", this field must be submitted together with `pickupSla`. For accepted attribute values, see the local product inventory feed specification.", "type": "string" }, "pickupSla": { @@ -9433,7 +9448,7 @@ "type": "integer" }, "reason": { - "description": "The reason for the cancellation. Orders that are canceled with a noInventory reason will lead to the removal of the product from Buy on Google until you make an update to that product. This will not affect your Shopping ads. Acceptable values are: - \"`autoPostInternal`\" - \"`autoPostInvalidBillingAddress`\" - \"`autoPostNoInventory`\" - \"`autoPostPriceError`\" - \"`autoPostUndeliverableShippingAddress`\" - \"`couponAbuse`\" - \"`customerCanceled`\" - \"`customerInitiatedCancel`\" - \"`customerSupportRequested`\" - \"`failToPushOrderGoogleError`\" - \"`failToPushOrderMerchantError`\" - \"`failToPushOrderMerchantFulfillmentError`\" - \"`failToPushOrderToMerchant`\" - \"`failToPushOrderToMerchantOutOfStock`\" - \"`invalidCoupon`\" - \"`malformedShippingAddress`\" - \"`merchantDidNotShipOnTime`\" - \"`noInventory`\" - \"`orderTimeout`\" - \"`other`\" - \"`paymentAbuse`\" - \"`paymentDeclined`\" - \"`priceError`\" - \"`returnRefundAbuse`\" - \"`shippingPriceError`\" - \"`taxError`\" - \"`undeliverableShippingAddress`\" - \"`unsupportedPoBoxAddress`\" ", + "description": "The reason for the cancellation. Orders that are canceled with a noInventory reason will lead to the removal of the product from Buy on Google until you make an update to that product. This will not affect your Shopping ads. Acceptable values are: - \"`autoPostInternal`\" - \"`autoPostInvalidBillingAddress`\" - \"`autoPostNoInventory`\" - \"`autoPostPriceError`\" - \"`autoPostUndeliverableShippingAddress`\" - \"`couponAbuse`\" - \"`customerCanceled`\" - \"`customerInitiatedCancel`\" - \"`customerSupportRequested`\" - \"`failToPushOrderGoogleError`\" - \"`failToPushOrderMerchantError`\" - \"`failToPushOrderMerchantFulfillmentError`\" - \"`failToPushOrderToMerchant`\" - \"`failToPushOrderToMerchantOutOfStock`\" - \"`invalidCoupon`\" - \"`malformedShippingAddress`\" - \"`merchantDidNotShipOnTime`\" - \"`noInventory`\" - \"`orderTimeout`\" - \"`other`\" - \"`paymentAbuse`\" - \"`paymentDeclined`\" - \"`priceError`\" - \"`returnRefundAbuse`\" - \"`shippingPriceError`\" - \"`taxError`\" - \"`undeliverableShippingAddress`\" - \"`unsupportedPoBoxAddress`\" - \"`failedToCaptureFunds`\" ", "type": "string" }, "reasonText": { @@ -10745,7 +10760,7 @@ "type": "integer" }, "reason": { - "description": "The reason for the cancellation. Acceptable values are: - \"`customerInitiatedCancel`\" - \"`invalidCoupon`\" - \"`malformedShippingAddress`\" - \"`noInventory`\" - \"`other`\" - \"`priceError`\" - \"`shippingPriceError`\" - \"`taxError`\" - \"`undeliverableShippingAddress`\" - \"`unsupportedPoBoxAddress`\" ", + "description": "The reason for the cancellation. Acceptable values are: - \"`customerInitiatedCancel`\" - \"`invalidCoupon`\" - \"`malformedShippingAddress`\" - \"`noInventory`\" - \"`other`\" - \"`priceError`\" - \"`shippingPriceError`\" - \"`taxError`\" - \"`undeliverableShippingAddress`\" - \"`unsupportedPoBoxAddress`\" - \"`failedToCaptureFunds`\" ", "type": "string" }, "reasonText": { @@ -10777,7 +10792,7 @@ "type": "string" }, "reason": { - "description": "The reason for the cancellation. Acceptable values are: - \"`customerInitiatedCancel`\" - \"`invalidCoupon`\" - \"`malformedShippingAddress`\" - \"`noInventory`\" - \"`other`\" - \"`priceError`\" - \"`shippingPriceError`\" - \"`taxError`\" - \"`undeliverableShippingAddress`\" - \"`unsupportedPoBoxAddress`\" ", + "description": "The reason for the cancellation. Acceptable values are: - \"`customerInitiatedCancel`\" - \"`invalidCoupon`\" - \"`malformedShippingAddress`\" - \"`noInventory`\" - \"`other`\" - \"`priceError`\" - \"`shippingPriceError`\" - \"`taxError`\" - \"`undeliverableShippingAddress`\" - \"`unsupportedPoBoxAddress`\" - \"`failedToCaptureFunds`\" ", "type": "string" }, "reasonText": { @@ -13184,6 +13199,20 @@ ], "type": "string" }, + "productType": { + "description": "Product filter by product type for the promotion.", + "items": { + "type": "string" + }, + "type": "array" + }, + "productTypeExclusion": { + "description": "Product filter by product type exclusion for the promotion.", + "items": { + "type": "string" + }, + "type": "array" + }, "promotionDestinationIds": { "description": "Destination ID for the promotion.", "items": { diff --git a/googleapiclient/discovery_cache/documents/content.v2.json b/googleapiclient/discovery_cache/documents/content.v2.json index 3f28ec9568c..e504b7e60d2 100644 --- a/googleapiclient/discovery_cache/documents/content.v2.json +++ b/googleapiclient/discovery_cache/documents/content.v2.json @@ -3298,7 +3298,7 @@ } } }, - "revision": "20210916", + "revision": "20210924", "rootUrl": "https://shoppingcontent.googleapis.com/", "schemas": { "Account": { diff --git a/googleapiclient/discovery_cache/documents/customsearch.v1.json b/googleapiclient/discovery_cache/documents/customsearch.v1.json index 46f1c5082f4..b23d3342952 100644 --- a/googleapiclient/discovery_cache/documents/customsearch.v1.json +++ b/googleapiclient/discovery_cache/documents/customsearch.v1.json @@ -674,7 +674,7 @@ } } }, - "revision": "20210918", + "revision": "20210924", "rootUrl": "https://customsearch.googleapis.com/", "schemas": { "Promotion": { diff --git a/googleapiclient/discovery_cache/documents/datacatalog.v1.json b/googleapiclient/discovery_cache/documents/datacatalog.v1.json index 14d3cdeb39d..81612f58087 100644 --- a/googleapiclient/discovery_cache/documents/datacatalog.v1.json +++ b/googleapiclient/discovery_cache/documents/datacatalog.v1.json @@ -1841,7 +1841,7 @@ } } }, - "revision": "20210909", + "revision": "20210910", "rootUrl": "https://datacatalog.googleapis.com/", "schemas": { "Binding": { diff --git a/googleapiclient/discovery_cache/documents/datacatalog.v1beta1.json b/googleapiclient/discovery_cache/documents/datacatalog.v1beta1.json index 267603efa6b..ecb49ebed5b 100644 --- a/googleapiclient/discovery_cache/documents/datacatalog.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/datacatalog.v1beta1.json @@ -1808,7 +1808,7 @@ } } }, - "revision": "20210909", + "revision": "20210910", "rootUrl": "https://datacatalog.googleapis.com/", "schemas": { "Binding": { diff --git a/googleapiclient/discovery_cache/documents/datalabeling.v1beta1.json b/googleapiclient/discovery_cache/documents/datalabeling.v1beta1.json index 363c85a7cd9..2476acea9ab 100644 --- a/googleapiclient/discovery_cache/documents/datalabeling.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/datalabeling.v1beta1.json @@ -1596,7 +1596,7 @@ } } }, - "revision": "20210907", + "revision": "20210916", "rootUrl": "https://datalabeling.googleapis.com/", "schemas": { "GoogleCloudDatalabelingV1alpha1CreateInstructionMetadata": { diff --git a/googleapiclient/discovery_cache/documents/datamigration.v1.json b/googleapiclient/discovery_cache/documents/datamigration.v1.json index 5f9bb257e56..82c1530093e 100644 --- a/googleapiclient/discovery_cache/documents/datamigration.v1.json +++ b/googleapiclient/discovery_cache/documents/datamigration.v1.json @@ -1049,7 +1049,7 @@ } } }, - "revision": "20210907", + "revision": "20210915", "rootUrl": "https://datamigration.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/datamigration.v1beta1.json b/googleapiclient/discovery_cache/documents/datamigration.v1beta1.json index 2d00e1ac898..4706561a4af 100644 --- a/googleapiclient/discovery_cache/documents/datamigration.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/datamigration.v1beta1.json @@ -1049,7 +1049,7 @@ } } }, - "revision": "20210907", + "revision": "20210915", "rootUrl": "https://datamigration.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/dataproc.v1.json b/googleapiclient/discovery_cache/documents/dataproc.v1.json index 2e607b4c453..88808bab2d6 100644 --- a/googleapiclient/discovery_cache/documents/dataproc.v1.json +++ b/googleapiclient/discovery_cache/documents/dataproc.v1.json @@ -2316,7 +2316,7 @@ } } }, - "revision": "20210909", + "revision": "20210920", "rootUrl": "https://dataproc.googleapis.com/", "schemas": { "AcceleratorConfig": { diff --git a/googleapiclient/discovery_cache/documents/datastore.v1.json b/googleapiclient/discovery_cache/documents/datastore.v1.json index 281fe6d280e..faa7b42b323 100644 --- a/googleapiclient/discovery_cache/documents/datastore.v1.json +++ b/googleapiclient/discovery_cache/documents/datastore.v1.json @@ -626,7 +626,7 @@ } } }, - "revision": "20210901", + "revision": "20210916", "rootUrl": "https://datastore.googleapis.com/", "schemas": { "AllocateIdsRequest": { diff --git a/googleapiclient/discovery_cache/documents/datastore.v1beta1.json b/googleapiclient/discovery_cache/documents/datastore.v1beta1.json index c699ef2f618..dea83d4d5ff 100644 --- a/googleapiclient/discovery_cache/documents/datastore.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/datastore.v1beta1.json @@ -168,7 +168,7 @@ } } }, - "revision": "20210901", + "revision": "20210916", "rootUrl": "https://datastore.googleapis.com/", "schemas": { "GoogleDatastoreAdminV1CommonMetadata": { diff --git a/googleapiclient/discovery_cache/documents/datastore.v1beta3.json b/googleapiclient/discovery_cache/documents/datastore.v1beta3.json index b417a07d954..05a2049c82d 100644 --- a/googleapiclient/discovery_cache/documents/datastore.v1beta3.json +++ b/googleapiclient/discovery_cache/documents/datastore.v1beta3.json @@ -308,7 +308,7 @@ } } }, - "revision": "20210901", + "revision": "20210916", "rootUrl": "https://datastore.googleapis.com/", "schemas": { "AllocateIdsRequest": { diff --git a/googleapiclient/discovery_cache/documents/deploymentmanager.alpha.json b/googleapiclient/discovery_cache/documents/deploymentmanager.alpha.json index 66b183615d7..18a67643765 100644 --- a/googleapiclient/discovery_cache/documents/deploymentmanager.alpha.json +++ b/googleapiclient/discovery_cache/documents/deploymentmanager.alpha.json @@ -1588,7 +1588,7 @@ } } }, - "revision": "20210909", + "revision": "20210923", "rootUrl": "https://deploymentmanager.googleapis.com/", "schemas": { "AsyncOptions": { diff --git a/googleapiclient/discovery_cache/documents/deploymentmanager.v2.json b/googleapiclient/discovery_cache/documents/deploymentmanager.v2.json index c4104ed8353..359808c76e2 100644 --- a/googleapiclient/discovery_cache/documents/deploymentmanager.v2.json +++ b/googleapiclient/discovery_cache/documents/deploymentmanager.v2.json @@ -988,7 +988,7 @@ } } }, - "revision": "20210909", + "revision": "20210923", "rootUrl": "https://deploymentmanager.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/deploymentmanager.v2beta.json b/googleapiclient/discovery_cache/documents/deploymentmanager.v2beta.json index 3649648e5d1..4b193f1f125 100644 --- a/googleapiclient/discovery_cache/documents/deploymentmanager.v2beta.json +++ b/googleapiclient/discovery_cache/documents/deploymentmanager.v2beta.json @@ -1552,7 +1552,7 @@ } } }, - "revision": "20210909", + "revision": "20210923", "rootUrl": "https://deploymentmanager.googleapis.com/", "schemas": { "AsyncOptions": { diff --git a/googleapiclient/discovery_cache/documents/dialogflow.v2.json b/googleapiclient/discovery_cache/documents/dialogflow.v2.json index 87a96c55c13..5924ea87713 100644 --- a/googleapiclient/discovery_cache/documents/dialogflow.v2.json +++ b/googleapiclient/discovery_cache/documents/dialogflow.v2.json @@ -6983,7 +6983,7 @@ } } }, - "revision": "20210909", + "revision": "20210918", "rootUrl": "https://dialogflow.googleapis.com/", "schemas": { "GoogleCloudDialogflowCxV3AudioInput": { @@ -7192,6 +7192,35 @@ }, "type": "object" }, + "GoogleCloudDialogflowCxV3DeployFlowMetadata": { + "description": "Metadata returned for the Environments.DeployFlow long running operation.", + "id": "GoogleCloudDialogflowCxV3DeployFlowMetadata", + "properties": { + "testErrors": { + "description": "Errors of running deployment tests.", + "items": { + "$ref": "GoogleCloudDialogflowCxV3TestError" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowCxV3DeployFlowResponse": { + "description": "The response message for Environments.DeployFlow.", + "id": "GoogleCloudDialogflowCxV3DeployFlowResponse", + "properties": { + "deployment": { + "description": "The name of the flow version Deployment. Format: `projects//locations//agents// environments//deployments/`.", + "type": "string" + }, + "environment": { + "$ref": "GoogleCloudDialogflowCxV3Environment", + "description": "The updated environment where the flow is deployed." + } + }, + "type": "object" + }, "GoogleCloudDialogflowCxV3DtmfInput": { "description": "Represents the input for dtmf event.", "id": "GoogleCloudDialogflowCxV3DtmfInput", @@ -7207,6 +7236,75 @@ }, "type": "object" }, + "GoogleCloudDialogflowCxV3Environment": { + "description": "Represents an environment for an agent. You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for testing, development, production, etc.", + "id": "GoogleCloudDialogflowCxV3Environment", + "properties": { + "description": { + "description": "The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected.", + "type": "string" + }, + "displayName": { + "description": "Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters.", + "type": "string" + }, + "name": { + "description": "The name of the environment. Format: `projects//locations//agents//environments/`.", + "type": "string" + }, + "testCasesConfig": { + "$ref": "GoogleCloudDialogflowCxV3EnvironmentTestCasesConfig", + "description": "The test cases config for continuous tests of this environment." + }, + "updateTime": { + "description": "Output only. Update time of this environment.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "versionConfigs": { + "description": "Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned.", + "items": { + "$ref": "GoogleCloudDialogflowCxV3EnvironmentVersionConfig" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowCxV3EnvironmentTestCasesConfig": { + "description": "The configuration for continuous tests.", + "id": "GoogleCloudDialogflowCxV3EnvironmentTestCasesConfig", + "properties": { + "enableContinuousRun": { + "description": "Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day.", + "type": "boolean" + }, + "enablePredeploymentRun": { + "description": "Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false.", + "type": "boolean" + }, + "testCases": { + "description": "A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/`", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowCxV3EnvironmentVersionConfig": { + "description": "Configuration for the version.", + "id": "GoogleCloudDialogflowCxV3EnvironmentVersionConfig", + "properties": { + "version": { + "description": "Required. Format: projects//locations//agents//flows//versions/.", + "type": "string" + } + }, + "type": "object" + }, "GoogleCloudDialogflowCxV3EventHandler": { "description": "An event handler specifies an event that can be handled during a session. When the specified event happens, the following actions are taken in order: * If there is a `trigger_fulfillment` associated with the event, it will be called. * If there is a `target_page` associated with the event, the session will transition into the specified page. * If there is a `target_flow` associated with the event, the session will transition into the specified flow.", "id": "GoogleCloudDialogflowCxV3EventHandler", @@ -8763,6 +8861,35 @@ }, "type": "object" }, + "GoogleCloudDialogflowCxV3beta1DeployFlowMetadata": { + "description": "Metadata returned for the Environments.DeployFlow long running operation.", + "id": "GoogleCloudDialogflowCxV3beta1DeployFlowMetadata", + "properties": { + "testErrors": { + "description": "Errors of running deployment tests.", + "items": { + "$ref": "GoogleCloudDialogflowCxV3beta1TestError" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowCxV3beta1DeployFlowResponse": { + "description": "The response message for Environments.DeployFlow.", + "id": "GoogleCloudDialogflowCxV3beta1DeployFlowResponse", + "properties": { + "deployment": { + "description": "The name of the flow version deployment. Format: `projects//locations//agents// environments//deployments/`.", + "type": "string" + }, + "environment": { + "$ref": "GoogleCloudDialogflowCxV3beta1Environment", + "description": "The updated environment where the flow is deployed." + } + }, + "type": "object" + }, "GoogleCloudDialogflowCxV3beta1DtmfInput": { "description": "Represents the input for dtmf event.", "id": "GoogleCloudDialogflowCxV3beta1DtmfInput", @@ -8778,6 +8905,75 @@ }, "type": "object" }, + "GoogleCloudDialogflowCxV3beta1Environment": { + "description": "Represents an environment for an agent. You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for testing, development, production, etc.", + "id": "GoogleCloudDialogflowCxV3beta1Environment", + "properties": { + "description": { + "description": "The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected.", + "type": "string" + }, + "displayName": { + "description": "Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters.", + "type": "string" + }, + "name": { + "description": "The name of the environment. Format: `projects//locations//agents//environments/`.", + "type": "string" + }, + "testCasesConfig": { + "$ref": "GoogleCloudDialogflowCxV3beta1EnvironmentTestCasesConfig", + "description": "The test cases config for continuous tests of this environment." + }, + "updateTime": { + "description": "Output only. Update time of this environment.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "versionConfigs": { + "description": "Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned.", + "items": { + "$ref": "GoogleCloudDialogflowCxV3beta1EnvironmentVersionConfig" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowCxV3beta1EnvironmentTestCasesConfig": { + "description": "The configuration for continuous tests.", + "id": "GoogleCloudDialogflowCxV3beta1EnvironmentTestCasesConfig", + "properties": { + "enableContinuousRun": { + "description": "Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day.", + "type": "boolean" + }, + "enablePredeploymentRun": { + "description": "Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false.", + "type": "boolean" + }, + "testCases": { + "description": "A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/`", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowCxV3beta1EnvironmentVersionConfig": { + "description": "Configuration for the version.", + "id": "GoogleCloudDialogflowCxV3beta1EnvironmentVersionConfig", + "properties": { + "version": { + "description": "Required. Format: projects//locations//agents//flows//versions/.", + "type": "string" + } + }, + "type": "object" + }, "GoogleCloudDialogflowCxV3beta1EventHandler": { "description": "An event handler specifies an event that can be handled during a session. When the specified event happens, the following actions are taken in order: * If there is a `trigger_fulfillment` associated with the event, it will be called. * If there is a `target_page` associated with the event, the session will transition into the specified page. * If there is a `target_flow` associated with the event, the session will transition into the specified flow.", "id": "GoogleCloudDialogflowCxV3beta1EventHandler", diff --git a/googleapiclient/discovery_cache/documents/dialogflow.v2beta1.json b/googleapiclient/discovery_cache/documents/dialogflow.v2beta1.json index 9e1b41dc3fe..7bc18fd1652 100644 --- a/googleapiclient/discovery_cache/documents/dialogflow.v2beta1.json +++ b/googleapiclient/discovery_cache/documents/dialogflow.v2beta1.json @@ -7315,7 +7315,7 @@ } } }, - "revision": "20210909", + "revision": "20210918", "rootUrl": "https://dialogflow.googleapis.com/", "schemas": { "GoogleCloudDialogflowCxV3AudioInput": { @@ -7524,6 +7524,35 @@ }, "type": "object" }, + "GoogleCloudDialogflowCxV3DeployFlowMetadata": { + "description": "Metadata returned for the Environments.DeployFlow long running operation.", + "id": "GoogleCloudDialogflowCxV3DeployFlowMetadata", + "properties": { + "testErrors": { + "description": "Errors of running deployment tests.", + "items": { + "$ref": "GoogleCloudDialogflowCxV3TestError" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowCxV3DeployFlowResponse": { + "description": "The response message for Environments.DeployFlow.", + "id": "GoogleCloudDialogflowCxV3DeployFlowResponse", + "properties": { + "deployment": { + "description": "The name of the flow version Deployment. Format: `projects//locations//agents// environments//deployments/`.", + "type": "string" + }, + "environment": { + "$ref": "GoogleCloudDialogflowCxV3Environment", + "description": "The updated environment where the flow is deployed." + } + }, + "type": "object" + }, "GoogleCloudDialogflowCxV3DtmfInput": { "description": "Represents the input for dtmf event.", "id": "GoogleCloudDialogflowCxV3DtmfInput", @@ -7539,6 +7568,75 @@ }, "type": "object" }, + "GoogleCloudDialogflowCxV3Environment": { + "description": "Represents an environment for an agent. You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for testing, development, production, etc.", + "id": "GoogleCloudDialogflowCxV3Environment", + "properties": { + "description": { + "description": "The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected.", + "type": "string" + }, + "displayName": { + "description": "Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters.", + "type": "string" + }, + "name": { + "description": "The name of the environment. Format: `projects//locations//agents//environments/`.", + "type": "string" + }, + "testCasesConfig": { + "$ref": "GoogleCloudDialogflowCxV3EnvironmentTestCasesConfig", + "description": "The test cases config for continuous tests of this environment." + }, + "updateTime": { + "description": "Output only. Update time of this environment.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "versionConfigs": { + "description": "Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned.", + "items": { + "$ref": "GoogleCloudDialogflowCxV3EnvironmentVersionConfig" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowCxV3EnvironmentTestCasesConfig": { + "description": "The configuration for continuous tests.", + "id": "GoogleCloudDialogflowCxV3EnvironmentTestCasesConfig", + "properties": { + "enableContinuousRun": { + "description": "Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day.", + "type": "boolean" + }, + "enablePredeploymentRun": { + "description": "Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false.", + "type": "boolean" + }, + "testCases": { + "description": "A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/`", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowCxV3EnvironmentVersionConfig": { + "description": "Configuration for the version.", + "id": "GoogleCloudDialogflowCxV3EnvironmentVersionConfig", + "properties": { + "version": { + "description": "Required. Format: projects//locations//agents//flows//versions/.", + "type": "string" + } + }, + "type": "object" + }, "GoogleCloudDialogflowCxV3EventHandler": { "description": "An event handler specifies an event that can be handled during a session. When the specified event happens, the following actions are taken in order: * If there is a `trigger_fulfillment` associated with the event, it will be called. * If there is a `target_page` associated with the event, the session will transition into the specified page. * If there is a `target_flow` associated with the event, the session will transition into the specified flow.", "id": "GoogleCloudDialogflowCxV3EventHandler", @@ -9095,6 +9193,35 @@ }, "type": "object" }, + "GoogleCloudDialogflowCxV3beta1DeployFlowMetadata": { + "description": "Metadata returned for the Environments.DeployFlow long running operation.", + "id": "GoogleCloudDialogflowCxV3beta1DeployFlowMetadata", + "properties": { + "testErrors": { + "description": "Errors of running deployment tests.", + "items": { + "$ref": "GoogleCloudDialogflowCxV3beta1TestError" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowCxV3beta1DeployFlowResponse": { + "description": "The response message for Environments.DeployFlow.", + "id": "GoogleCloudDialogflowCxV3beta1DeployFlowResponse", + "properties": { + "deployment": { + "description": "The name of the flow version deployment. Format: `projects//locations//agents// environments//deployments/`.", + "type": "string" + }, + "environment": { + "$ref": "GoogleCloudDialogflowCxV3beta1Environment", + "description": "The updated environment where the flow is deployed." + } + }, + "type": "object" + }, "GoogleCloudDialogflowCxV3beta1DtmfInput": { "description": "Represents the input for dtmf event.", "id": "GoogleCloudDialogflowCxV3beta1DtmfInput", @@ -9110,6 +9237,75 @@ }, "type": "object" }, + "GoogleCloudDialogflowCxV3beta1Environment": { + "description": "Represents an environment for an agent. You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for testing, development, production, etc.", + "id": "GoogleCloudDialogflowCxV3beta1Environment", + "properties": { + "description": { + "description": "The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected.", + "type": "string" + }, + "displayName": { + "description": "Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters.", + "type": "string" + }, + "name": { + "description": "The name of the environment. Format: `projects//locations//agents//environments/`.", + "type": "string" + }, + "testCasesConfig": { + "$ref": "GoogleCloudDialogflowCxV3beta1EnvironmentTestCasesConfig", + "description": "The test cases config for continuous tests of this environment." + }, + "updateTime": { + "description": "Output only. Update time of this environment.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "versionConfigs": { + "description": "Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned.", + "items": { + "$ref": "GoogleCloudDialogflowCxV3beta1EnvironmentVersionConfig" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowCxV3beta1EnvironmentTestCasesConfig": { + "description": "The configuration for continuous tests.", + "id": "GoogleCloudDialogflowCxV3beta1EnvironmentTestCasesConfig", + "properties": { + "enableContinuousRun": { + "description": "Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day.", + "type": "boolean" + }, + "enablePredeploymentRun": { + "description": "Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false.", + "type": "boolean" + }, + "testCases": { + "description": "A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/`", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowCxV3beta1EnvironmentVersionConfig": { + "description": "Configuration for the version.", + "id": "GoogleCloudDialogflowCxV3beta1EnvironmentVersionConfig", + "properties": { + "version": { + "description": "Required. Format: projects//locations//agents//flows//versions/.", + "type": "string" + } + }, + "type": "object" + }, "GoogleCloudDialogflowCxV3beta1EventHandler": { "description": "An event handler specifies an event that can be handled during a session. When the specified event happens, the following actions are taken in order: * If there is a `trigger_fulfillment` associated with the event, it will be called. * If there is a `target_page` associated with the event, the session will transition into the specified page. * If there is a `target_flow` associated with the event, the session will transition into the specified flow.", "id": "GoogleCloudDialogflowCxV3beta1EventHandler", @@ -15987,6 +16183,10 @@ "description": "Returns a response containing a custom, platform-specific payload.", "type": "object" }, + "telephonyTransferCall": { + "$ref": "GoogleCloudDialogflowV2beta1ResponseMessageTelephonyTransferCall", + "description": "A signal that the client should transfer the phone call connected to this agent to a third-party endpoint." + }, "text": { "$ref": "GoogleCloudDialogflowV2beta1ResponseMessageText", "description": "Returns a text response." @@ -16015,6 +16215,21 @@ }, "type": "object" }, + "GoogleCloudDialogflowV2beta1ResponseMessageTelephonyTransferCall": { + "description": "Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint.", + "id": "GoogleCloudDialogflowV2beta1ResponseMessageTelephonyTransferCall", + "properties": { + "phoneNumber": { + "description": "Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164).", + "type": "string" + }, + "sipUri": { + "description": "Transfer the call to a SIP endpoint.", + "type": "string" + } + }, + "type": "object" + }, "GoogleCloudDialogflowV2beta1ResponseMessageText": { "description": "The text response message.", "id": "GoogleCloudDialogflowV2beta1ResponseMessageText", diff --git a/googleapiclient/discovery_cache/documents/dialogflow.v3.json b/googleapiclient/discovery_cache/documents/dialogflow.v3.json index 740654b1b5e..2686ea1116a 100644 --- a/googleapiclient/discovery_cache/documents/dialogflow.v3.json +++ b/googleapiclient/discovery_cache/documents/dialogflow.v3.json @@ -696,6 +696,35 @@ "https://www.googleapis.com/auth/dialogflow" ] }, + "deployFlow": { + "description": "Deploys a flow to the specified Environment. This method is a [long-running operation](https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation). The returned `Operation` type has the following method-specific fields: - `metadata`: DeployFlowMetadata - `response`: DeployFlowResponse", + "flatPath": "v3/projects/{projectsId}/locations/{locationsId}/agents/{agentsId}/environments/{environmentsId}:deployFlow", + "httpMethod": "POST", + "id": "dialogflow.projects.locations.agents.environments.deployFlow", + "parameterOrder": [ + "environment" + ], + "parameters": { + "environment": { + "description": "Required. The environment to deploy the flow to. Format: `projects//locations//agents// environments/`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/agents/[^/]+/environments/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v3/{+environment}:deployFlow", + "request": { + "$ref": "GoogleCloudDialogflowCxV3DeployFlowRequest" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/dialogflow" + ] + }, "get": { "description": "Retrieves the specified Environment.", "flatPath": "v3/projects/{projectsId}/locations/{locationsId}/agents/{agentsId}/environments/{environmentsId}", @@ -903,6 +932,73 @@ } } }, + "deployments": { + "methods": { + "get": { + "description": "Retrieves the specified Deployment.", + "flatPath": "v3/projects/{projectsId}/locations/{locationsId}/agents/{agentsId}/environments/{environmentsId}/deployments/{deploymentsId}", + "httpMethod": "GET", + "id": "dialogflow.projects.locations.agents.environments.deployments.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the Deployment. Format: `projects//locations//agents//environments//deployments/`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/agents/[^/]+/environments/[^/]+/deployments/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v3/{+name}", + "response": { + "$ref": "GoogleCloudDialogflowCxV3Deployment" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/dialogflow" + ] + }, + "list": { + "description": "Returns the list of all deployments in the specified Environment.", + "flatPath": "v3/projects/{projectsId}/locations/{locationsId}/agents/{agentsId}/environments/{environmentsId}/deployments", + "httpMethod": "GET", + "id": "dialogflow.projects.locations.agents.environments.deployments.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "The maximum number of items to return in a single page. By default 20 and at most 100.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "The next_page_token value returned from a previous list request.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The Environment to list all environments for. Format: `projects//locations//agents//environments/`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/agents/[^/]+/environments/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v3/{+parent}/deployments", + "response": { + "$ref": "GoogleCloudDialogflowCxV3ListDeploymentsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/dialogflow" + ] + } + } + }, "experiments": { "methods": { "create": { @@ -3493,7 +3589,7 @@ ], "parameters": { "name": { - "description": "Required. Resource name of the settings. Format: `projects//locations//securitySettings/`.", + "description": "Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/securitySettings/[^/]+$", "required": true, @@ -3623,7 +3719,7 @@ } } }, - "revision": "20210909", + "revision": "20210918", "rootUrl": "https://dialogflow.googleapis.com/", "schemas": { "GoogleCloudDialogflowCxV3AdvancedSettings": { @@ -3993,6 +4089,109 @@ }, "type": "object" }, + "GoogleCloudDialogflowCxV3DeployFlowMetadata": { + "description": "Metadata returned for the Environments.DeployFlow long running operation.", + "id": "GoogleCloudDialogflowCxV3DeployFlowMetadata", + "properties": { + "testErrors": { + "description": "Errors of running deployment tests.", + "items": { + "$ref": "GoogleCloudDialogflowCxV3TestError" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowCxV3DeployFlowRequest": { + "description": "The request message for Environments.DeployFlow.", + "id": "GoogleCloudDialogflowCxV3DeployFlowRequest", + "properties": { + "flowVersion": { + "description": "Required. The flow version to deploy. Format: `projects//locations//agents// flows//versions/`.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowCxV3DeployFlowResponse": { + "description": "The response message for Environments.DeployFlow.", + "id": "GoogleCloudDialogflowCxV3DeployFlowResponse", + "properties": { + "deployment": { + "description": "The name of the flow version Deployment. Format: `projects//locations//agents// environments//deployments/`.", + "type": "string" + }, + "environment": { + "$ref": "GoogleCloudDialogflowCxV3Environment", + "description": "The updated environment where the flow is deployed." + } + }, + "type": "object" + }, + "GoogleCloudDialogflowCxV3Deployment": { + "description": "Represents an deployment in an environment. A deployment happens when a flow version configured to be active in the environment. You can configure running pre-deployment steps, e.g. running validation test cases, experiment auto-rollout, etc.", + "id": "GoogleCloudDialogflowCxV3Deployment", + "properties": { + "endTime": { + "description": "End time of this deployment.", + "format": "google-datetime", + "type": "string" + }, + "flowVersion": { + "description": "The name of the flow version for this deployment. Format: projects//locations//agents//flows//versions/.", + "type": "string" + }, + "name": { + "description": "The name of the deployment. Format: projects//locations//agents//environments//deployments/.", + "type": "string" + }, + "result": { + "$ref": "GoogleCloudDialogflowCxV3DeploymentResult", + "description": "Result of the deployment." + }, + "startTime": { + "description": "Start time of this deployment.", + "format": "google-datetime", + "type": "string" + }, + "state": { + "description": "The current state of the deployment.", + "enum": [ + "STATE_UNSPECIFIED", + "RUNNING", + "SUCCEEDED", + "FAILED" + ], + "enumDescriptions": [ + "State unspecified.", + "The deployment is running.", + "The deployment succeeded.", + "The deployment failed." + ], + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowCxV3DeploymentResult": { + "description": "Result of the deployment.", + "id": "GoogleCloudDialogflowCxV3DeploymentResult", + "properties": { + "deploymentTestResults": { + "description": "Results of test cases running before the deployment. Format: `projects//locations//agents//testCases//results/`.", + "items": { + "type": "string" + }, + "type": "array" + }, + "experiment": { + "description": "The name of the experiment triggered by this deployment. Format: projects//locations//agents//environments//experiments/.", + "type": "string" + } + }, + "type": "object" + }, "GoogleCloudDialogflowCxV3DetectIntentRequest": { "description": "The request to detect user's intent.", "id": "GoogleCloudDialogflowCxV3DetectIntentRequest", @@ -4179,6 +4378,10 @@ "description": "The name of the environment. Format: `projects//locations//agents//environments/`.", "type": "string" }, + "testCasesConfig": { + "$ref": "GoogleCloudDialogflowCxV3EnvironmentTestCasesConfig", + "description": "The test cases config for continuous tests of this environment." + }, "updateTime": { "description": "Output only. Update time of this environment.", "format": "google-datetime", @@ -4195,6 +4398,28 @@ }, "type": "object" }, + "GoogleCloudDialogflowCxV3EnvironmentTestCasesConfig": { + "description": "The configuration for continuous tests.", + "id": "GoogleCloudDialogflowCxV3EnvironmentTestCasesConfig", + "properties": { + "enableContinuousRun": { + "description": "Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day.", + "type": "boolean" + }, + "enablePredeploymentRun": { + "description": "Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false.", + "type": "boolean" + }, + "testCases": { + "description": "A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/`", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "GoogleCloudDialogflowCxV3EnvironmentVersionConfig": { "description": "Configuration for the version.", "id": "GoogleCloudDialogflowCxV3EnvironmentVersionConfig", @@ -5263,6 +5488,24 @@ }, "type": "object" }, + "GoogleCloudDialogflowCxV3ListDeploymentsResponse": { + "description": "The response message for Deployments.ListDeployments.", + "id": "GoogleCloudDialogflowCxV3ListDeploymentsResponse", + "properties": { + "deployments": { + "description": "The list of deployments. There will be a maximum number of items returned based on the page_size field in the request. The list may in some cases be empty or contain fewer entries than page_size even if this isn't the last page.", + "items": { + "$ref": "GoogleCloudDialogflowCxV3Deployment" + }, + "type": "array" + }, + "nextPageToken": { + "description": "Token to retrieve the next page of results, or empty if there are no more results in the list.", + "type": "string" + } + }, + "type": "object" + }, "GoogleCloudDialogflowCxV3ListEntityTypesResponse": { "description": "The response message for EntityTypes.ListEntityTypes.", "id": "GoogleCloudDialogflowCxV3ListEntityTypesResponse", @@ -6372,7 +6615,7 @@ "type": "string" }, "name": { - "description": "Required. Resource name of the settings. Format: `projects//locations//securitySettings/`.", + "description": "Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`.", "type": "string" }, "purgeDataTypes": { @@ -7632,6 +7875,35 @@ }, "type": "object" }, + "GoogleCloudDialogflowCxV3beta1DeployFlowMetadata": { + "description": "Metadata returned for the Environments.DeployFlow long running operation.", + "id": "GoogleCloudDialogflowCxV3beta1DeployFlowMetadata", + "properties": { + "testErrors": { + "description": "Errors of running deployment tests.", + "items": { + "$ref": "GoogleCloudDialogflowCxV3beta1TestError" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowCxV3beta1DeployFlowResponse": { + "description": "The response message for Environments.DeployFlow.", + "id": "GoogleCloudDialogflowCxV3beta1DeployFlowResponse", + "properties": { + "deployment": { + "description": "The name of the flow version deployment. Format: `projects//locations//agents// environments//deployments/`.", + "type": "string" + }, + "environment": { + "$ref": "GoogleCloudDialogflowCxV3beta1Environment", + "description": "The updated environment where the flow is deployed." + } + }, + "type": "object" + }, "GoogleCloudDialogflowCxV3beta1DtmfInput": { "description": "Represents the input for dtmf event.", "id": "GoogleCloudDialogflowCxV3beta1DtmfInput", @@ -7647,6 +7919,75 @@ }, "type": "object" }, + "GoogleCloudDialogflowCxV3beta1Environment": { + "description": "Represents an environment for an agent. You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for testing, development, production, etc.", + "id": "GoogleCloudDialogflowCxV3beta1Environment", + "properties": { + "description": { + "description": "The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected.", + "type": "string" + }, + "displayName": { + "description": "Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters.", + "type": "string" + }, + "name": { + "description": "The name of the environment. Format: `projects//locations//agents//environments/`.", + "type": "string" + }, + "testCasesConfig": { + "$ref": "GoogleCloudDialogflowCxV3beta1EnvironmentTestCasesConfig", + "description": "The test cases config for continuous tests of this environment." + }, + "updateTime": { + "description": "Output only. Update time of this environment.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "versionConfigs": { + "description": "Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned.", + "items": { + "$ref": "GoogleCloudDialogflowCxV3beta1EnvironmentVersionConfig" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowCxV3beta1EnvironmentTestCasesConfig": { + "description": "The configuration for continuous tests.", + "id": "GoogleCloudDialogflowCxV3beta1EnvironmentTestCasesConfig", + "properties": { + "enableContinuousRun": { + "description": "Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day.", + "type": "boolean" + }, + "enablePredeploymentRun": { + "description": "Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false.", + "type": "boolean" + }, + "testCases": { + "description": "A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/`", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowCxV3beta1EnvironmentVersionConfig": { + "description": "Configuration for the version.", + "id": "GoogleCloudDialogflowCxV3beta1EnvironmentVersionConfig", + "properties": { + "version": { + "description": "Required. Format: projects//locations//agents//flows//versions/.", + "type": "string" + } + }, + "type": "object" + }, "GoogleCloudDialogflowCxV3beta1EventHandler": { "description": "An event handler specifies an event that can be handled during a session. When the specified event happens, the following actions are taken in order: * If there is a `trigger_fulfillment` associated with the event, it will be called. * If there is a `target_page` associated with the event, the session will transition into the specified page. * If there is a `target_flow` associated with the event, the session will transition into the specified flow.", "id": "GoogleCloudDialogflowCxV3beta1EventHandler", diff --git a/googleapiclient/discovery_cache/documents/dialogflow.v3beta1.json b/googleapiclient/discovery_cache/documents/dialogflow.v3beta1.json index e259f42254e..232349a36f6 100644 --- a/googleapiclient/discovery_cache/documents/dialogflow.v3beta1.json +++ b/googleapiclient/discovery_cache/documents/dialogflow.v3beta1.json @@ -696,6 +696,35 @@ "https://www.googleapis.com/auth/dialogflow" ] }, + "deployFlow": { + "description": "Deploys a flow to the specified Environment. This method is a [long-running operation](https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation). The returned `Operation` type has the following method-specific fields: - `metadata`: DeployFlowMetadata - `response`: DeployFlowResponse", + "flatPath": "v3beta1/projects/{projectsId}/locations/{locationsId}/agents/{agentsId}/environments/{environmentsId}:deployFlow", + "httpMethod": "POST", + "id": "dialogflow.projects.locations.agents.environments.deployFlow", + "parameterOrder": [ + "environment" + ], + "parameters": { + "environment": { + "description": "Required. The environment to deploy the flow to. Format: `projects//locations//agents// environments/`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/agents/[^/]+/environments/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v3beta1/{+environment}:deployFlow", + "request": { + "$ref": "GoogleCloudDialogflowCxV3beta1DeployFlowRequest" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/dialogflow" + ] + }, "get": { "description": "Retrieves the specified Environment.", "flatPath": "v3beta1/projects/{projectsId}/locations/{locationsId}/agents/{agentsId}/environments/{environmentsId}", @@ -903,6 +932,73 @@ } } }, + "deployments": { + "methods": { + "get": { + "description": "Retrieves the specified Deployment.", + "flatPath": "v3beta1/projects/{projectsId}/locations/{locationsId}/agents/{agentsId}/environments/{environmentsId}/deployments/{deploymentsId}", + "httpMethod": "GET", + "id": "dialogflow.projects.locations.agents.environments.deployments.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the Deployment. Format: `projects//locations//agents//environments//deployments/`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/agents/[^/]+/environments/[^/]+/deployments/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v3beta1/{+name}", + "response": { + "$ref": "GoogleCloudDialogflowCxV3beta1Deployment" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/dialogflow" + ] + }, + "list": { + "description": "Returns the list of all deployments in the specified Environment.", + "flatPath": "v3beta1/projects/{projectsId}/locations/{locationsId}/agents/{agentsId}/environments/{environmentsId}/deployments", + "httpMethod": "GET", + "id": "dialogflow.projects.locations.agents.environments.deployments.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "The maximum number of items to return in a single page. By default 20 and at most 100.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "The next_page_token value returned from a previous list request.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The Environment to list all environments for. Format: `projects//locations//agents//environments/`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/agents/[^/]+/environments/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v3beta1/{+parent}/deployments", + "response": { + "$ref": "GoogleCloudDialogflowCxV3beta1ListDeploymentsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/dialogflow" + ] + } + } + }, "experiments": { "methods": { "create": { @@ -3493,7 +3589,7 @@ ], "parameters": { "name": { - "description": "Required. Resource name of the settings. Format: `projects//locations//securitySettings/`.", + "description": "Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/securitySettings/[^/]+$", "required": true, @@ -3623,7 +3719,7 @@ } } }, - "revision": "20210909", + "revision": "20210918", "rootUrl": "https://dialogflow.googleapis.com/", "schemas": { "GoogleCloudDialogflowCxV3AudioInput": { @@ -3832,6 +3928,35 @@ }, "type": "object" }, + "GoogleCloudDialogflowCxV3DeployFlowMetadata": { + "description": "Metadata returned for the Environments.DeployFlow long running operation.", + "id": "GoogleCloudDialogflowCxV3DeployFlowMetadata", + "properties": { + "testErrors": { + "description": "Errors of running deployment tests.", + "items": { + "$ref": "GoogleCloudDialogflowCxV3TestError" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowCxV3DeployFlowResponse": { + "description": "The response message for Environments.DeployFlow.", + "id": "GoogleCloudDialogflowCxV3DeployFlowResponse", + "properties": { + "deployment": { + "description": "The name of the flow version Deployment. Format: `projects//locations//agents// environments//deployments/`.", + "type": "string" + }, + "environment": { + "$ref": "GoogleCloudDialogflowCxV3Environment", + "description": "The updated environment where the flow is deployed." + } + }, + "type": "object" + }, "GoogleCloudDialogflowCxV3DtmfInput": { "description": "Represents the input for dtmf event.", "id": "GoogleCloudDialogflowCxV3DtmfInput", @@ -3847,6 +3972,75 @@ }, "type": "object" }, + "GoogleCloudDialogflowCxV3Environment": { + "description": "Represents an environment for an agent. You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for testing, development, production, etc.", + "id": "GoogleCloudDialogflowCxV3Environment", + "properties": { + "description": { + "description": "The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected.", + "type": "string" + }, + "displayName": { + "description": "Required. The human-readable name of the environment (unique in an agent). Limit of 64 characters.", + "type": "string" + }, + "name": { + "description": "The name of the environment. Format: `projects//locations//agents//environments/`.", + "type": "string" + }, + "testCasesConfig": { + "$ref": "GoogleCloudDialogflowCxV3EnvironmentTestCasesConfig", + "description": "The test cases config for continuous tests of this environment." + }, + "updateTime": { + "description": "Output only. Update time of this environment.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "versionConfigs": { + "description": "Required. A list of configurations for flow versions. You should include version configs for all flows that are reachable from `Start Flow` in the agent. Otherwise, an error will be returned.", + "items": { + "$ref": "GoogleCloudDialogflowCxV3EnvironmentVersionConfig" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowCxV3EnvironmentTestCasesConfig": { + "description": "The configuration for continuous tests.", + "id": "GoogleCloudDialogflowCxV3EnvironmentTestCasesConfig", + "properties": { + "enableContinuousRun": { + "description": "Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day.", + "type": "boolean" + }, + "enablePredeploymentRun": { + "description": "Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false.", + "type": "boolean" + }, + "testCases": { + "description": "A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/`", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowCxV3EnvironmentVersionConfig": { + "description": "Configuration for the version.", + "id": "GoogleCloudDialogflowCxV3EnvironmentVersionConfig", + "properties": { + "version": { + "description": "Required. Format: projects//locations//agents//flows//versions/.", + "type": "string" + } + }, + "type": "object" + }, "GoogleCloudDialogflowCxV3EventHandler": { "description": "An event handler specifies an event that can be handled during a session. When the specified event happens, the following actions are taken in order: * If there is a `trigger_fulfillment` associated with the event, it will be called. * If there is a `target_page` associated with the event, the session will transition into the specified page. * If there is a `target_flow` associated with the event, the session will transition into the specified flow.", "id": "GoogleCloudDialogflowCxV3EventHandler", @@ -5564,6 +5758,109 @@ }, "type": "object" }, + "GoogleCloudDialogflowCxV3beta1DeployFlowMetadata": { + "description": "Metadata returned for the Environments.DeployFlow long running operation.", + "id": "GoogleCloudDialogflowCxV3beta1DeployFlowMetadata", + "properties": { + "testErrors": { + "description": "Errors of running deployment tests.", + "items": { + "$ref": "GoogleCloudDialogflowCxV3beta1TestError" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowCxV3beta1DeployFlowRequest": { + "description": "The request message for Environments.DeployFlow.", + "id": "GoogleCloudDialogflowCxV3beta1DeployFlowRequest", + "properties": { + "flowVersion": { + "description": "Required. The flow version to deploy. Format: `projects//locations//agents// flows//versions/`.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowCxV3beta1DeployFlowResponse": { + "description": "The response message for Environments.DeployFlow.", + "id": "GoogleCloudDialogflowCxV3beta1DeployFlowResponse", + "properties": { + "deployment": { + "description": "The name of the flow version deployment. Format: `projects//locations//agents// environments//deployments/`.", + "type": "string" + }, + "environment": { + "$ref": "GoogleCloudDialogflowCxV3beta1Environment", + "description": "The updated environment where the flow is deployed." + } + }, + "type": "object" + }, + "GoogleCloudDialogflowCxV3beta1Deployment": { + "description": "Represents an deployment in an environment. A deployment happens when a flow version configured to be active in the environment. You can configure running pre-deployment steps, e.g. running validation test cases, experiment auto-rollout, etc.", + "id": "GoogleCloudDialogflowCxV3beta1Deployment", + "properties": { + "endTime": { + "description": "End time of this deployment.", + "format": "google-datetime", + "type": "string" + }, + "flowVersion": { + "description": "The name of the flow version for this deployment. Format: projects//locations//agents//flows//versions/.", + "type": "string" + }, + "name": { + "description": "The name of the deployment. Format: projects//locations//agents//environments//deployments/.", + "type": "string" + }, + "result": { + "$ref": "GoogleCloudDialogflowCxV3beta1DeploymentResult", + "description": "Result of the deployment." + }, + "startTime": { + "description": "Start time of this deployment.", + "format": "google-datetime", + "type": "string" + }, + "state": { + "description": "The current state of the deployment.", + "enum": [ + "STATE_UNSPECIFIED", + "RUNNING", + "SUCCEEDED", + "FAILED" + ], + "enumDescriptions": [ + "State unspecified.", + "The deployment is running.", + "The deployment succeeded.", + "The deployment failed." + ], + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowCxV3beta1DeploymentResult": { + "description": "Result of the deployment.", + "id": "GoogleCloudDialogflowCxV3beta1DeploymentResult", + "properties": { + "deploymentTestResults": { + "description": "Results of test cases running before the deployment. Format: `projects//locations//agents//testCases//results/`.", + "items": { + "type": "string" + }, + "type": "array" + }, + "experiment": { + "description": "The name of the experiment triggered by this deployment. Format: projects//locations//agents//environments//experiments/.", + "type": "string" + } + }, + "type": "object" + }, "GoogleCloudDialogflowCxV3beta1DetectIntentRequest": { "description": "The request to detect user's intent.", "id": "GoogleCloudDialogflowCxV3beta1DetectIntentRequest", @@ -5750,6 +6047,10 @@ "description": "The name of the environment. Format: `projects//locations//agents//environments/`.", "type": "string" }, + "testCasesConfig": { + "$ref": "GoogleCloudDialogflowCxV3beta1EnvironmentTestCasesConfig", + "description": "The test cases config for continuous tests of this environment." + }, "updateTime": { "description": "Output only. Update time of this environment.", "format": "google-datetime", @@ -5766,6 +6067,28 @@ }, "type": "object" }, + "GoogleCloudDialogflowCxV3beta1EnvironmentTestCasesConfig": { + "description": "The configuration for continuous tests.", + "id": "GoogleCloudDialogflowCxV3beta1EnvironmentTestCasesConfig", + "properties": { + "enableContinuousRun": { + "description": "Whether to run test cases in TestCasesConfig.test_cases periodically. Default false. If set to ture, run once a day.", + "type": "boolean" + }, + "enablePredeploymentRun": { + "description": "Whether to run test cases in TestCasesConfig.test_cases before deploying a flow version to the environment. Default false.", + "type": "boolean" + }, + "testCases": { + "description": "A list of test case names to run. They should be under the same agent. Format of each test case name: `projects//locations/ /agents//testCases/`", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "GoogleCloudDialogflowCxV3beta1EnvironmentVersionConfig": { "description": "Configuration for the version.", "id": "GoogleCloudDialogflowCxV3beta1EnvironmentVersionConfig", @@ -6834,6 +7157,24 @@ }, "type": "object" }, + "GoogleCloudDialogflowCxV3beta1ListDeploymentsResponse": { + "description": "The response message for Deployments.ListDeployments.", + "id": "GoogleCloudDialogflowCxV3beta1ListDeploymentsResponse", + "properties": { + "deployments": { + "description": "The list of deployments. There will be a maximum number of items returned based on the page_size field in the request. The list may in some cases be empty or contain fewer entries than page_size even if this isn't the last page.", + "items": { + "$ref": "GoogleCloudDialogflowCxV3beta1Deployment" + }, + "type": "array" + }, + "nextPageToken": { + "description": "Token to retrieve the next page of results, or empty if there are no more results in the list.", + "type": "string" + } + }, + "type": "object" + }, "GoogleCloudDialogflowCxV3beta1ListEntityTypesResponse": { "description": "The response message for EntityTypes.ListEntityTypes.", "id": "GoogleCloudDialogflowCxV3beta1ListEntityTypesResponse", @@ -7943,7 +8284,7 @@ "type": "string" }, "name": { - "description": "Required. Resource name of the settings. Format: `projects//locations//securitySettings/`.", + "description": "Resource name of the settings. Required for the SecuritySettingsService.UpdateSecuritySettings method. SecuritySettingsService.CreateSecuritySettings populates the name automatically. Format: `projects//locations//securitySettings/`.", "type": "string" }, "purgeDataTypes": { diff --git a/googleapiclient/discovery_cache/documents/displayvideo.v1.json b/googleapiclient/discovery_cache/documents/displayvideo.v1.json index cb4d1014080..800b12bd30a 100644 --- a/googleapiclient/discovery_cache/documents/displayvideo.v1.json +++ b/googleapiclient/discovery_cache/documents/displayvideo.v1.json @@ -7317,7 +7317,7 @@ } } }, - "revision": "20210920", + "revision": "20210923", "rootUrl": "https://displayvideo.googleapis.com/", "schemas": { "ActivateManualTriggerRequest": { diff --git a/googleapiclient/discovery_cache/documents/dlp.v2.json b/googleapiclient/discovery_cache/documents/dlp.v2.json index be1cda3e463..302254c48fe 100644 --- a/googleapiclient/discovery_cache/documents/dlp.v2.json +++ b/googleapiclient/discovery_cache/documents/dlp.v2.json @@ -3412,7 +3412,7 @@ } } }, - "revision": "20210910", + "revision": "20210918", "rootUrl": "https://dlp.googleapis.com/", "schemas": { "GooglePrivacyDlpV2Action": { diff --git a/googleapiclient/discovery_cache/documents/dns.v1.json b/googleapiclient/discovery_cache/documents/dns.v1.json index 11c88daacc9..ebd42260cea 100644 --- a/googleapiclient/discovery_cache/documents/dns.v1.json +++ b/googleapiclient/discovery_cache/documents/dns.v1.json @@ -1235,7 +1235,7 @@ } } }, - "revision": "20210907", + "revision": "20210914", "rootUrl": "https://dns.googleapis.com/", "schemas": { "Change": { diff --git a/googleapiclient/discovery_cache/documents/dns.v1beta2.json b/googleapiclient/discovery_cache/documents/dns.v1beta2.json index e4ed232f2fa..2255a62f926 100644 --- a/googleapiclient/discovery_cache/documents/dns.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/dns.v1beta2.json @@ -1730,7 +1730,7 @@ } } }, - "revision": "20210907", + "revision": "20210914", "rootUrl": "https://dns.googleapis.com/", "schemas": { "Change": { diff --git a/googleapiclient/discovery_cache/documents/docs.v1.json b/googleapiclient/discovery_cache/documents/docs.v1.json index fcc108f2fa4..3020bffc029 100644 --- a/googleapiclient/discovery_cache/documents/docs.v1.json +++ b/googleapiclient/discovery_cache/documents/docs.v1.json @@ -216,7 +216,7 @@ } } }, - "revision": "20210915", + "revision": "20210922", "rootUrl": "https://docs.googleapis.com/", "schemas": { "AutoText": { diff --git a/googleapiclient/discovery_cache/documents/documentai.v1.json b/googleapiclient/discovery_cache/documents/documentai.v1.json index b2db14531c6..b8459673071 100644 --- a/googleapiclient/discovery_cache/documents/documentai.v1.json +++ b/googleapiclient/discovery_cache/documents/documentai.v1.json @@ -1029,7 +1029,7 @@ } } }, - "revision": "20210917", + "revision": "20210926", "rootUrl": "https://documentai.googleapis.com/", "schemas": { "GoogleCloudDocumentaiUiv1beta3BatchDeleteDocumentsMetadata": { @@ -3145,7 +3145,7 @@ "id": "GoogleCloudDocumentaiV1SchemaEntityType", "properties": { "baseType": { - "description": "Type of the entity. It must be one of the following: `document` - the entity represents a classification of a logical document. `object` - if the entity has properties it is likely an object (or or a document.) `datetime` - the entity is a date or time value. `money` - the entity represents a money value amount. `number` - the entity is a number - integer or floating point. `string` - the entity is a string value. `boolean` - the entity is a boolean value. `address` - the entity is a location address.", + "description": "Type of the entity. It must be one of the following: `document` - the entity represents a classification of a logical document. `object` - if the entity has properties it is likely an object (or or a document.) `datetime` - the entity is a date or time value. `money` - the entity represents a money value amount. `number` - the entity is a number - integer or floating point. `string` - the entity is a string value. `boolean` - the entity is a boolean value. `address` - the entity is a location address. `duration` - the entity is a duration.", "type": "string" }, "description": { diff --git a/googleapiclient/discovery_cache/documents/documentai.v1beta2.json b/googleapiclient/discovery_cache/documents/documentai.v1beta2.json index bff9e122324..455d309aa29 100644 --- a/googleapiclient/discovery_cache/documents/documentai.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/documentai.v1beta2.json @@ -292,7 +292,7 @@ } } }, - "revision": "20210917", + "revision": "20210926", "rootUrl": "https://documentai.googleapis.com/", "schemas": { "GoogleCloudDocumentaiUiv1beta3BatchDeleteDocumentsMetadata": { diff --git a/googleapiclient/discovery_cache/documents/documentai.v1beta3.json b/googleapiclient/discovery_cache/documents/documentai.v1beta3.json index 9de9de9d97a..526a19c37ca 100644 --- a/googleapiclient/discovery_cache/documents/documentai.v1beta3.json +++ b/googleapiclient/discovery_cache/documents/documentai.v1beta3.json @@ -796,7 +796,7 @@ } } }, - "revision": "20210917", + "revision": "20210926", "rootUrl": "https://documentai.googleapis.com/", "schemas": { "GoogleCloudDocumentaiUiv1beta3BatchDeleteDocumentsMetadata": { @@ -5621,7 +5621,7 @@ "id": "GoogleCloudDocumentaiV1beta3SchemaEntityType", "properties": { "baseType": { - "description": "Type of the entity. It must be one of the following: `document` - the entity represents a classification of a logical document. `object` - if the entity has properties it is likely an object (or or a document.) `datetime` - the entity is a date or time value. `money` - the entity represents a money value amount. `number` - the entity is a number - integer or floating point. `string` - the entity is a string value. `boolean` - the entity is a boolean value. `address` - the entity is a location address.", + "description": "Type of the entity. It must be one of the following: `document` - the entity represents a classification of a logical document. `object` - if the entity has properties it is likely an object (or or a document.) `datetime` - the entity is a date or time value. `money` - the entity represents a money value amount. `number` - the entity is a number - integer or floating point. `string` - the entity is a string value. `boolean` - the entity is a boolean value. `address` - the entity is a location address. `duration` - the entity is a duration.", "type": "string" }, "description": { diff --git a/googleapiclient/discovery_cache/documents/domainsrdap.v1.json b/googleapiclient/discovery_cache/documents/domainsrdap.v1.json index 34f49e627a9..058f7fabd24 100644 --- a/googleapiclient/discovery_cache/documents/domainsrdap.v1.json +++ b/googleapiclient/discovery_cache/documents/domainsrdap.v1.json @@ -289,7 +289,7 @@ } } }, - "revision": "20210920", + "revision": "20210925", "rootUrl": "https://domainsrdap.googleapis.com/", "schemas": { "HttpBody": { diff --git a/googleapiclient/discovery_cache/documents/doubleclickbidmanager.v1.1.json b/googleapiclient/discovery_cache/documents/doubleclickbidmanager.v1.1.json index 499c794be12..a3538ef1dd7 100644 --- a/googleapiclient/discovery_cache/documents/doubleclickbidmanager.v1.1.json +++ b/googleapiclient/discovery_cache/documents/doubleclickbidmanager.v1.1.json @@ -280,7 +280,7 @@ } } }, - "revision": "20210824", + "revision": "20210920", "rootUrl": "https://doubleclickbidmanager.googleapis.com/", "schemas": { "ChannelGrouping": { @@ -618,7 +618,15 @@ "FILTER_INSERTION_ORDER_GOAL_VALUE", "FILTER_OMID_CAPABLE", "FILTER_VENDOR_MEASUREMENT_MODE", - "FILTER_IMPRESSION_LOSS_REJECTION_REASON" + "FILTER_IMPRESSION_LOSS_REJECTION_REASON", + "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_START", + "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_FIRST_QUARTILE", + "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_MID_POINT", + "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_THIRD_QUARTILE", + "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_COMPLETE", + "FILTER_VERIFICATION_VIDEO_RESIZED", + "FILTER_VERIFICATION_AUDIBILITY_START", + "FILTER_VERIFICATION_AUDIBILITY_COMPLETE" ], "enumDescriptions": [ "", @@ -902,6 +910,14 @@ "", "", "", + "", + "", + "", + "", + "", + "", + "", + "", "" ], "type": "string" @@ -1268,7 +1284,15 @@ "FILTER_INSERTION_ORDER_GOAL_VALUE", "FILTER_OMID_CAPABLE", "FILTER_VENDOR_MEASUREMENT_MODE", - "FILTER_IMPRESSION_LOSS_REJECTION_REASON" + "FILTER_IMPRESSION_LOSS_REJECTION_REASON", + "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_START", + "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_FIRST_QUARTILE", + "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_MID_POINT", + "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_THIRD_QUARTILE", + "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_COMPLETE", + "FILTER_VERIFICATION_VIDEO_RESIZED", + "FILTER_VERIFICATION_AUDIBILITY_START", + "FILTER_VERIFICATION_AUDIBILITY_COMPLETE" ], "enumDescriptions": [ "", @@ -1552,6 +1576,14 @@ "", "", "", + "", + "", + "", + "", + "", + "", + "", + "", "" ], "type": "string" @@ -2032,7 +2064,8 @@ "METRIC_WIN_LOSS_DEAL_AVAILABLE_REQUESTS", "METRIC_WIN_LOSS_LINEITEM_AVAILABLE_REQUESTS", "METRIC_WIN_LOSS_DEAL_TARGETED_IMPRESSIONS", - "METRIC_WIN_LOSS_LINEITEM_TARGETED_IMPRESSIONS" + "METRIC_WIN_LOSS_LINEITEM_TARGETED_IMPRESSIONS", + "METRIC_VERIFICATION_VIDEO_PLAYER_SIZE_MEASURABLE_IMPRESSIONS" ], "enumDescriptions": [ "", @@ -2501,6 +2534,7 @@ "", "", "", + "", "" ], "type": "string" @@ -2924,7 +2958,15 @@ "FILTER_INSERTION_ORDER_GOAL_VALUE", "FILTER_OMID_CAPABLE", "FILTER_VENDOR_MEASUREMENT_MODE", - "FILTER_IMPRESSION_LOSS_REJECTION_REASON" + "FILTER_IMPRESSION_LOSS_REJECTION_REASON", + "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_START", + "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_FIRST_QUARTILE", + "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_MID_POINT", + "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_THIRD_QUARTILE", + "FILTER_VERIFICATION_VIDEO_PLAYER_SIZE_COMPLETE", + "FILTER_VERIFICATION_VIDEO_RESIZED", + "FILTER_VERIFICATION_AUDIBILITY_START", + "FILTER_VERIFICATION_AUDIBILITY_COMPLETE" ], "enumDescriptions": [ "", @@ -3208,6 +3250,14 @@ "", "", "", + "", + "", + "", + "", + "", + "", + "", + "", "" ], "type": "string" diff --git a/googleapiclient/discovery_cache/documents/doubleclickbidmanager.v1.json b/googleapiclient/discovery_cache/documents/doubleclickbidmanager.v1.json index fd1da51fc91..70e11f9a9a6 100644 --- a/googleapiclient/discovery_cache/documents/doubleclickbidmanager.v1.json +++ b/googleapiclient/discovery_cache/documents/doubleclickbidmanager.v1.json @@ -96,7 +96,7 @@ }, "protocol": "rest", "resources": {}, - "revision": "20210824", + "revision": "20210920", "rootUrl": "https://doubleclickbidmanager.googleapis.com/", "schemas": {}, "servicePath": "doubleclickbidmanager/v1/", diff --git a/googleapiclient/discovery_cache/documents/doubleclicksearch.v2.json b/googleapiclient/discovery_cache/documents/doubleclicksearch.v2.json index 77f2b99a9c3..ed8de2c3f50 100644 --- a/googleapiclient/discovery_cache/documents/doubleclicksearch.v2.json +++ b/googleapiclient/discovery_cache/documents/doubleclicksearch.v2.json @@ -399,7 +399,7 @@ } } }, - "revision": "20210914", + "revision": "20210921", "rootUrl": "https://doubleclicksearch.googleapis.com/", "schemas": { "Availability": { diff --git a/googleapiclient/discovery_cache/documents/drive.v2.json b/googleapiclient/discovery_cache/documents/drive.v2.json index 6eda1b5fcd4..4a4c8a8b9f0 100644 --- a/googleapiclient/discovery_cache/documents/drive.v2.json +++ b/googleapiclient/discovery_cache/documents/drive.v2.json @@ -38,7 +38,7 @@ "description": "Manages files in Drive including uploading, downloading, searching, detecting changes, and updating sharing permissions.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/drive/", - "etag": "\"uWj2hSb4GVjzdDlAnRd2gbM1ZQ8/Vu4poje-2dWgpys_T9FZKaW40Qk\"", + "etag": "\"uWj2hSb4GVjzdDlAnRd2gbM1ZQ8/1zEdrQsJh-Ob8KbDsrp0vIynqK0\"", "icons": { "x16": "https://ssl.gstatic.com/docs/doclist/images/drive_icon_16.png", "x32": "https://ssl.gstatic.com/docs/doclist/images/drive_icon_32.png" @@ -1659,7 +1659,7 @@ ] }, "patch": { - "description": "Updates file metadata and/or content. This method supports patch semantics.", + "description": "Updates a file's metadata and/or content. When calling this method, only populate fields in the request that you want to modify. When updating fields, some fields might might change automatically, such as modifiedDate. This method supports patch semantics.", "httpMethod": "PATCH", "id": "drive.files.patch", "parameterOrder": [ @@ -1931,7 +1931,7 @@ ] }, "update": { - "description": "Updates file metadata and/or content.", + "description": "Updates a file's metadata and/or content. When calling this method, only populate fields in the request that you want to modify. When updating fields, some fields might be changed automatically, such as modifiedDate. This method supports patch semantics.", "httpMethod": "PUT", "id": "drive.files.update", "mediaUpload": { @@ -3527,7 +3527,7 @@ } } }, - "revision": "20210912", + "revision": "20210921", "rootUrl": "https://www.googleapis.com/", "schemas": { "About": { diff --git a/googleapiclient/discovery_cache/documents/drive.v3.json b/googleapiclient/discovery_cache/documents/drive.v3.json index db01e31ca9f..52e62fa57e3 100644 --- a/googleapiclient/discovery_cache/documents/drive.v3.json +++ b/googleapiclient/discovery_cache/documents/drive.v3.json @@ -35,7 +35,7 @@ "description": "Manages files in Drive including uploading, downloading, searching, detecting changes, and updating sharing permissions.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/drive/", - "etag": "\"uWj2hSb4GVjzdDlAnRd2gbM1ZQ8/FI7fSrKiR16OF44lPfi5QIDdyps\"", + "etag": "\"uWj2hSb4GVjzdDlAnRd2gbM1ZQ8/W19uzic4WgAV-31T2Pfs85esB2s\"", "icons": { "x16": "https://ssl.gstatic.com/docs/doclist/images/drive_icon_16.png", "x32": "https://ssl.gstatic.com/docs/doclist/images/drive_icon_32.png" @@ -1211,7 +1211,7 @@ ] }, "update": { - "description": "Updates a file's metadata and/or content. This method supports patch semantics.", + "description": "Updates a file's metadata and/or content. When calling this method, only populate fields in the request that you want to modify. When updating fields, some fields might change automatically, such as modifiedDate. This method supports patch semantics.", "httpMethod": "PATCH", "id": "drive.files.update", "mediaUpload": { @@ -2191,7 +2191,7 @@ } } }, - "revision": "20210912", + "revision": "20210921", "rootUrl": "https://www.googleapis.com/", "schemas": { "About": { diff --git a/googleapiclient/discovery_cache/documents/driveactivity.v2.json b/googleapiclient/discovery_cache/documents/driveactivity.v2.json index 8499a9eede0..e59794d3e01 100644 --- a/googleapiclient/discovery_cache/documents/driveactivity.v2.json +++ b/googleapiclient/discovery_cache/documents/driveactivity.v2.json @@ -132,7 +132,7 @@ } } }, - "revision": "20210910", + "revision": "20210922", "rootUrl": "https://driveactivity.googleapis.com/", "schemas": { "Action": { diff --git a/googleapiclient/discovery_cache/documents/essentialcontacts.v1.json b/googleapiclient/discovery_cache/documents/essentialcontacts.v1.json index 1294882af48..5b43f83320c 100644 --- a/googleapiclient/discovery_cache/documents/essentialcontacts.v1.json +++ b/googleapiclient/discovery_cache/documents/essentialcontacts.v1.json @@ -850,7 +850,7 @@ } } }, - "revision": "20210918", + "revision": "20210924", "rootUrl": "https://essentialcontacts.googleapis.com/", "schemas": { "GoogleCloudEssentialcontactsV1ComputeContactsResponse": { diff --git a/googleapiclient/discovery_cache/documents/factchecktools.v1alpha1.json b/googleapiclient/discovery_cache/documents/factchecktools.v1alpha1.json index dcbdd44884e..1acd6d0dfd0 100644 --- a/googleapiclient/discovery_cache/documents/factchecktools.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/factchecktools.v1alpha1.json @@ -304,7 +304,7 @@ } } }, - "revision": "20210918", + "revision": "20210924", "rootUrl": "https://factchecktools.googleapis.com/", "schemas": { "GoogleFactcheckingFactchecktoolsV1alpha1Claim": { diff --git a/googleapiclient/discovery_cache/documents/fcm.v1.json b/googleapiclient/discovery_cache/documents/fcm.v1.json index b743964e440..6cd78d955f4 100644 --- a/googleapiclient/discovery_cache/documents/fcm.v1.json +++ b/googleapiclient/discovery_cache/documents/fcm.v1.json @@ -146,7 +146,7 @@ } } }, - "revision": "20210913", + "revision": "20210920", "rootUrl": "https://fcm.googleapis.com/", "schemas": { "AndroidConfig": { diff --git a/googleapiclient/discovery_cache/documents/fcmdata.v1beta1.json b/googleapiclient/discovery_cache/documents/fcmdata.v1beta1.json index 4d3c9497f55..59feaf4c505 100644 --- a/googleapiclient/discovery_cache/documents/fcmdata.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/fcmdata.v1beta1.json @@ -154,7 +154,7 @@ } } }, - "revision": "20210918", + "revision": "20210924", "rootUrl": "https://fcmdata.googleapis.com/", "schemas": { "GoogleFirebaseFcmDataV1beta1AndroidDeliveryData": { diff --git a/googleapiclient/discovery_cache/documents/firebase.v1beta1.json b/googleapiclient/discovery_cache/documents/firebase.v1beta1.json index 2508c2e4f86..6ff19da337e 100644 --- a/googleapiclient/discovery_cache/documents/firebase.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/firebase.v1beta1.json @@ -1121,7 +1121,7 @@ } } }, - "revision": "20210917", + "revision": "20210924", "rootUrl": "https://firebase.googleapis.com/", "schemas": { "AddFirebaseRequest": { diff --git a/googleapiclient/discovery_cache/documents/firebaseappcheck.v1beta.json b/googleapiclient/discovery_cache/documents/firebaseappcheck.v1beta.json index 8244fa8995f..bebc6bcc1d3 100644 --- a/googleapiclient/discovery_cache/documents/firebaseappcheck.v1beta.json +++ b/googleapiclient/discovery_cache/documents/firebaseappcheck.v1beta.json @@ -1057,7 +1057,7 @@ } } }, - "revision": "20210910", + "revision": "20210917", "rootUrl": "https://firebaseappcheck.googleapis.com/", "schemas": { "GoogleFirebaseAppcheckV1betaAppAttestChallengeResponse": { diff --git a/googleapiclient/discovery_cache/documents/firebasedatabase.v1beta.json b/googleapiclient/discovery_cache/documents/firebasedatabase.v1beta.json index 0a15664098d..fac1479a2e5 100644 --- a/googleapiclient/discovery_cache/documents/firebasedatabase.v1beta.json +++ b/googleapiclient/discovery_cache/documents/firebasedatabase.v1beta.json @@ -317,7 +317,7 @@ } } }, - "revision": "20210917", + "revision": "20210924", "rootUrl": "https://firebasedatabase.googleapis.com/", "schemas": { "DatabaseInstance": { diff --git a/googleapiclient/discovery_cache/documents/firebasedynamiclinks.v1.json b/googleapiclient/discovery_cache/documents/firebasedynamiclinks.v1.json index 893b1beca7e..727c518cbdd 100644 --- a/googleapiclient/discovery_cache/documents/firebasedynamiclinks.v1.json +++ b/googleapiclient/discovery_cache/documents/firebasedynamiclinks.v1.json @@ -224,7 +224,7 @@ } } }, - "revision": "20210913", + "revision": "20210920", "rootUrl": "https://firebasedynamiclinks.googleapis.com/", "schemas": { "AnalyticsInfo": { diff --git a/googleapiclient/discovery_cache/documents/firebasehosting.v1.json b/googleapiclient/discovery_cache/documents/firebasehosting.v1.json index ddb7468eba3..c473f78cfa8 100644 --- a/googleapiclient/discovery_cache/documents/firebasehosting.v1.json +++ b/googleapiclient/discovery_cache/documents/firebasehosting.v1.json @@ -186,7 +186,7 @@ } } }, - "revision": "20210818", + "revision": "20210921", "rootUrl": "https://firebasehosting.googleapis.com/", "schemas": { "CancelOperationRequest": { diff --git a/googleapiclient/discovery_cache/documents/firebasehosting.v1beta1.json b/googleapiclient/discovery_cache/documents/firebasehosting.v1beta1.json index cdafb2e04fa..3eae5f50a5c 100644 --- a/googleapiclient/discovery_cache/documents/firebasehosting.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/firebasehosting.v1beta1.json @@ -1939,7 +1939,7 @@ } } }, - "revision": "20210818", + "revision": "20210921", "rootUrl": "https://firebasehosting.googleapis.com/", "schemas": { "ActingUser": { diff --git a/googleapiclient/discovery_cache/documents/firebaseml.v1.json b/googleapiclient/discovery_cache/documents/firebaseml.v1.json index 2fea2f5f5b9..fec8b588a7c 100644 --- a/googleapiclient/discovery_cache/documents/firebaseml.v1.json +++ b/googleapiclient/discovery_cache/documents/firebaseml.v1.json @@ -204,7 +204,7 @@ } } }, - "revision": "20210914", + "revision": "20210922", "rootUrl": "https://firebaseml.googleapis.com/", "schemas": { "CancelOperationRequest": { diff --git a/googleapiclient/discovery_cache/documents/firebaseml.v1beta2.json b/googleapiclient/discovery_cache/documents/firebaseml.v1beta2.json index 1bb3b8faa13..e59cff71b3c 100644 --- a/googleapiclient/discovery_cache/documents/firebaseml.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/firebaseml.v1beta2.json @@ -318,7 +318,7 @@ } } }, - "revision": "20210914", + "revision": "20210922", "rootUrl": "https://firebaseml.googleapis.com/", "schemas": { "DownloadModelResponse": { diff --git a/googleapiclient/discovery_cache/documents/firebasestorage.v1beta.json b/googleapiclient/discovery_cache/documents/firebasestorage.v1beta.json index 9d40b4b3785..699f3e474ee 100644 --- a/googleapiclient/discovery_cache/documents/firebasestorage.v1beta.json +++ b/googleapiclient/discovery_cache/documents/firebasestorage.v1beta.json @@ -238,7 +238,7 @@ } } }, - "revision": "20210823", + "revision": "20210917", "rootUrl": "https://firebasestorage.googleapis.com/", "schemas": { "AddFirebaseRequest": { @@ -301,7 +301,8 @@ "DELETING_TEMP_BUCKET", "SUCCEEDED", "FAILED", - "ROLLING_BACK" + "ROLLING_BACK", + "ROLLED_BACK" ], "enumDescriptions": [ "Unspecified state. Should not be used.", @@ -313,8 +314,9 @@ "The second STS transfer to move all objects from the temp bucket to the final bucket is underway.", "The temp bucket is being emptied and deleted.", "All stages of the migration have completed and the operation has been marked done and updated with a response.", - "The migration failed at some stage and it is not possible to continue retrying that stage. Manual recovery may be needed.", - "The migration has encountered a permanent failure and is now being rolled back so that the source bucket is restored to its original state." + "The migration failed at some stage and it is not possible to continue retrying that stage. Manual recovery may be needed. Rollback is either impossible at this stage, or has been attempted and failed.", + "The migration has encountered a permanent failure and is now being rolled back so that the source bucket is restored to its original state.", + "The migration has been successfully rolled back." ], "type": "string" } @@ -348,7 +350,8 @@ "DELETING_TEMP_BUCKET", "SUCCEEDED", "FAILED", - "ROLLING_BACK" + "ROLLING_BACK", + "ROLLED_BACK" ], "enumDescriptions": [ "Unspecified state. Should not be used.", @@ -360,8 +363,9 @@ "The second STS transfer to move all objects from the temp bucket to the final bucket is underway.", "The temp bucket is being emptied and deleted.", "All stages of the migration have completed and the operation has been marked done and updated with a response.", - "The migration failed at some stage and it is not possible to continue retrying that stage. Manual recovery may be needed.", - "The migration has encountered a permanent failure and is now being rolled back so that the source bucket is restored to its original state." + "The migration failed at some stage and it is not possible to continue retrying that stage. Manual recovery may be needed. Rollback is either impossible at this stage, or has been attempted and failed.", + "The migration has encountered a permanent failure and is now being rolled back so that the source bucket is restored to its original state.", + "The migration has been successfully rolled back." ], "type": "string" } diff --git a/googleapiclient/discovery_cache/documents/firestore.v1.json b/googleapiclient/discovery_cache/documents/firestore.v1.json index ad548e6961b..ea7004a89df 100644 --- a/googleapiclient/discovery_cache/documents/firestore.v1.json +++ b/googleapiclient/discovery_cache/documents/firestore.v1.json @@ -141,6 +141,32 @@ "https://www.googleapis.com/auth/datastore" ] }, + "get": { + "description": "Gets information about a database.", + "flatPath": "v1/projects/{projectsId}/databases/{databasesId}", + "httpMethod": "GET", + "id": "firestore.projects.databases.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. A name of the form `projects/{project_id}/databases/{database_id}`", + "location": "path", + "pattern": "^projects/[^/]+/databases/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "GoogleFirestoreAdminV1Database" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/datastore" + ] + }, "importDocuments": { "description": "Imports documents into Google Cloud Firestore. Existing documents with the same name are overwritten. The import occurs in the background and its progress can be monitored and managed via the Operation resource that is created. If an ImportDocuments operation is cancelled, it is possible that a subset of the data has already been imported to Cloud Firestore.", "flatPath": "v1/projects/{projectsId}/databases/{databasesId}:importDocuments", @@ -169,6 +195,67 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/datastore" ] + }, + "list": { + "description": "List all the databases in the project.", + "flatPath": "v1/projects/{projectsId}/databases", + "httpMethod": "GET", + "id": "firestore.projects.databases.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. A parent name of the form `projects/{project_id}`", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/databases", + "response": { + "$ref": "GoogleFirestoreAdminV1ListDatabasesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/datastore" + ] + }, + "patch": { + "description": "Updates a database.", + "flatPath": "v1/projects/{projectsId}/databases/{databasesId}", + "httpMethod": "PATCH", + "id": "firestore.projects.databases.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The resource name of the Database. Format: `projects/{project}/databases/{database}`", + "location": "path", + "pattern": "^projects/[^/]+/databases/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "The list of fields to be updated.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}", + "request": { + "$ref": "GoogleFirestoreAdminV1Database" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/datastore" + ] } }, "resources": { @@ -1160,7 +1247,7 @@ } } }, - "revision": "20210901", + "revision": "20210916", "rootUrl": "https://firestore.googleapis.com/", "schemas": { "ArrayValue": { @@ -1685,6 +1772,55 @@ }, "type": "object" }, + "GoogleFirestoreAdminV1Database": { + "description": "A Cloud Firestore Database in Native Mode. Currently one database is allowed per cloud project. It is named '(default)'", + "id": "GoogleFirestoreAdminV1Database", + "properties": { + "concurrencyMode": { + "description": "The concurrency control mode to use for this database.", + "enum": [ + "CONCURRENCY_MODE_UNSPECIFIED", + "OPTIMISTIC", + "PESSIMISTIC", + "OPTIMISTIC_WITH_ENTITY_GROUPS" + ], + "enumDescriptions": [ + "Not used.", + "Use optimistic concurrency control by default. This is the setting for Cloud Firestore customers.", + "Use pessimistic concurrency control by default. This is the setting for Cloud Firestore customers.", + "Use optimistic concurrency control with entity groups by default. This is the setting for Cloud Datastore customers." + ], + "type": "string" + }, + "etag": { + "description": "This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + "type": "string" + }, + "locationId": { + "description": "The location of the database. Available databases are listed at https://cloud.google.com/firestore/docs/locations.", + "type": "string" + }, + "name": { + "description": "The resource name of the Database. Format: `projects/{project}/databases/{database}`", + "type": "string" + }, + "type": { + "description": "The type of the database. See https://cloud.google.com/datastore/docs/firestore-or-datastore for information about how to choose.", + "enum": [ + "DATABASE_TYPE_UNSPECIFIED", + "FIRESTORE_NATIVE", + "DATASTORE_MODE" + ], + "enumDescriptions": [ + "The default value. This value is used if the database type is omitted.", + "Firestore Native Mode", + "Firestore in Datastore Mode." + ], + "type": "string" + } + }, + "type": "object" + }, "GoogleFirestoreAdminV1ExportDocumentsMetadata": { "description": "Metadata for google.longrunning.Operation results from FirestoreAdmin.ExportDocuments.", "id": "GoogleFirestoreAdminV1ExportDocumentsMetadata", @@ -2116,6 +2252,20 @@ }, "type": "object" }, + "GoogleFirestoreAdminV1ListDatabasesResponse": { + "description": "The list of databases for a project.", + "id": "GoogleFirestoreAdminV1ListDatabasesResponse", + "properties": { + "databases": { + "description": "The databases in the project.", + "items": { + "$ref": "GoogleFirestoreAdminV1Database" + }, + "type": "array" + } + }, + "type": "object" + }, "GoogleFirestoreAdminV1ListFieldsResponse": { "description": "The response for FirestoreAdmin.ListFields.", "id": "GoogleFirestoreAdminV1ListFieldsResponse", diff --git a/googleapiclient/discovery_cache/documents/firestore.v1beta1.json b/googleapiclient/discovery_cache/documents/firestore.v1beta1.json index 2fdc27b0984..265480dbf0d 100644 --- a/googleapiclient/discovery_cache/documents/firestore.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/firestore.v1beta1.json @@ -849,7 +849,7 @@ } } }, - "revision": "20210901", + "revision": "20210916", "rootUrl": "https://firestore.googleapis.com/", "schemas": { "ArrayValue": { diff --git a/googleapiclient/discovery_cache/documents/firestore.v1beta2.json b/googleapiclient/discovery_cache/documents/firestore.v1beta2.json index ecdaa94cf5a..4cb3bbd20c3 100644 --- a/googleapiclient/discovery_cache/documents/firestore.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/firestore.v1beta2.json @@ -415,7 +415,7 @@ } } }, - "revision": "20210901", + "revision": "20210916", "rootUrl": "https://firestore.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/fitness.v1.json b/googleapiclient/discovery_cache/documents/fitness.v1.json index 8b7cf46d0d3..e69540c186b 100644 --- a/googleapiclient/discovery_cache/documents/fitness.v1.json +++ b/googleapiclient/discovery_cache/documents/fitness.v1.json @@ -831,7 +831,7 @@ } } }, - "revision": "20210920", + "revision": "20210922", "rootUrl": "https://fitness.googleapis.com/", "schemas": { "AggregateBucket": { @@ -937,7 +937,7 @@ "fitness.users.dataset.aggregate" ] }, - "description": "The end of a window of time. Data that intersects with this time window will be aggregated. The time is in milliseconds since epoch, inclusive.", + "description": "The end of a window of time. Data that intersects with this time window will be aggregated. The time is in milliseconds since epoch, inclusive. The maximum allowed difference between start_time_millis // and end_time_millis is 7776000000 (roughly 90 days).", "format": "int64", "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/games.v1.json b/googleapiclient/discovery_cache/documents/games.v1.json index cdc18f1d804..4ec4c045105 100644 --- a/googleapiclient/discovery_cache/documents/games.v1.json +++ b/googleapiclient/discovery_cache/documents/games.v1.json @@ -1224,7 +1224,7 @@ } } }, - "revision": "20210909", + "revision": "20210922", "rootUrl": "https://games.googleapis.com/", "schemas": { "AchievementDefinition": { diff --git a/googleapiclient/discovery_cache/documents/gamesConfiguration.v1configuration.json b/googleapiclient/discovery_cache/documents/gamesConfiguration.v1configuration.json index 4e0fee57995..b3c428a088b 100644 --- a/googleapiclient/discovery_cache/documents/gamesConfiguration.v1configuration.json +++ b/googleapiclient/discovery_cache/documents/gamesConfiguration.v1configuration.json @@ -439,7 +439,7 @@ } } }, - "revision": "20210909", + "revision": "20210922", "rootUrl": "https://gamesconfiguration.googleapis.com/", "schemas": { "AchievementConfiguration": { diff --git a/googleapiclient/discovery_cache/documents/gamesManagement.v1management.json b/googleapiclient/discovery_cache/documents/gamesManagement.v1management.json index d9dd0efd034..4b4839f353b 100644 --- a/googleapiclient/discovery_cache/documents/gamesManagement.v1management.json +++ b/googleapiclient/discovery_cache/documents/gamesManagement.v1management.json @@ -471,7 +471,7 @@ } } }, - "revision": "20210909", + "revision": "20210922", "rootUrl": "https://gamesmanagement.googleapis.com/", "schemas": { "AchievementResetAllResponse": { diff --git a/googleapiclient/discovery_cache/documents/gameservices.v1.json b/googleapiclient/discovery_cache/documents/gameservices.v1.json index 811a5d33ed1..14cae4b9252 100644 --- a/googleapiclient/discovery_cache/documents/gameservices.v1.json +++ b/googleapiclient/discovery_cache/documents/gameservices.v1.json @@ -1357,7 +1357,7 @@ } } }, - "revision": "20210907", + "revision": "20210915", "rootUrl": "https://gameservices.googleapis.com/", "schemas": { "AuditConfig": { @@ -2631,7 +2631,7 @@ "type": "array" }, "logConfig": { - "description": "The config returned to callers of tech.iam.IAM.CheckPolicy for any entries that match the LOG action.", + "description": "The config returned to callers of CheckPolicy for any entries that match the LOG action.", "items": { "$ref": "LogConfig" }, diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1.json b/googleapiclient/discovery_cache/documents/gkehub.v1.json index 6afe8e081a1..e5325101e0e 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v1.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v1.json @@ -905,7 +905,7 @@ } } }, - "revision": "20210910", + "revision": "20210920", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "AuditConfig": { @@ -1041,6 +1041,13 @@ "$ref": "ConfigManagementGitConfig", "description": "Git repo configuration for the cluster." }, + "resourceRequirements": { + "additionalProperties": { + "$ref": "ConfigManagementContainerResourceRequirements" + }, + "description": "Specifies CPU and memory limits for containers, keyed by container name", + "type": "object" + }, "sourceFormat": { "description": "Specifies whether the Config Sync Repo is in \u201chierarchical\u201d or \u201cunstructured\u201d mode.", "type": "string" @@ -1052,6 +1059,22 @@ "description": "The state of ConfigSync's deployment on a cluster", "id": "ConfigManagementConfigSyncDeploymentState", "properties": { + "admissionWebhook": { + "description": "Deployment state of admission-webhook", + "enum": [ + "DEPLOYMENT_STATE_UNSPECIFIED", + "NOT_INSTALLED", + "INSTALLED", + "ERROR" + ], + "enumDescriptions": [ + "Deployment's state cannot be determined", + "Deployment is not installed", + "Deployment is installed", + "Deployment was attempted to be installed, but has errors" + ], + "type": "string" + }, "gitSync": { "description": "Deployment state of the git-sync pod", "enum": [ @@ -1174,6 +1197,10 @@ "description": "Specific versioning information pertaining to ConfigSync's Pods", "id": "ConfigManagementConfigSyncVersion", "properties": { + "admissionWebhook": { + "description": "Version of the deployed admission_webhook pod", + "type": "string" + }, "gitSync": { "description": "Version of the deployed git-sync pod", "type": "string" @@ -1201,6 +1228,25 @@ }, "type": "object" }, + "ConfigManagementContainerResourceRequirements": { + "description": "ResourceRequirements allows to override the CPU and memory resource requirements of a container.", + "id": "ConfigManagementContainerResourceRequirements", + "properties": { + "containerName": { + "description": "Name of the container", + "type": "string" + }, + "cpuLimit": { + "$ref": "ConfigManagementQuantity", + "description": "Allows to override the CPU limit of a container" + }, + "memoryLimit": { + "$ref": "ConfigManagementQuantity", + "description": "Allows to override the memory limit of a container" + } + }, + "type": "object" + }, "ConfigManagementErrorResource": { "description": "Model for a config file in the git repo with an associated Sync error", "id": "ConfigManagementErrorResource", @@ -1275,6 +1321,10 @@ "description": "URL for the HTTPS proxy to be used when communicating with the Git repo.", "type": "string" }, + "noSslVerify": { + "description": "Enable or disable the SSL certificate verification Default: false.", + "type": "boolean" + }, "policyDir": { "description": "The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.", "type": "string" @@ -1287,6 +1337,11 @@ "description": "The branch of the repository to sync from. Default: master.", "type": "string" }, + "syncDepth": { + "description": "The depth of git commits synced by the git-sync container.", + "format": "int64", + "type": "string" + }, "syncRepo": { "description": "The URL of the Git repository to use as the source of truth.", "type": "string" @@ -1570,6 +1625,17 @@ }, "type": "object" }, + "ConfigManagementQuantity": { + "description": "The view model of a single quantity, e.g. \"800 MiB\". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto", + "id": "ConfigManagementQuantity", + "properties": { + "string": { + "description": "Stringified version of the quantity, e.g., \"800 MiB\".", + "type": "string" + } + }, + "type": "object" + }, "ConfigManagementSyncError": { "description": "An ACM created error representing a problem syncing configurations", "id": "ConfigManagementSyncError", diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1alpha.json b/googleapiclient/discovery_cache/documents/gkehub.v1alpha.json index e34ba328902..d4ad8eec0bd 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v1alpha.json @@ -670,7 +670,7 @@ } } }, - "revision": "20210910", + "revision": "20210920", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "AuditConfig": { @@ -851,6 +851,13 @@ "$ref": "ConfigManagementGitConfig", "description": "Git repo configuration for the cluster." }, + "resourceRequirements": { + "additionalProperties": { + "$ref": "ConfigManagementContainerResourceRequirements" + }, + "description": "Specifies CPU and memory limits for containers, keyed by container name", + "type": "object" + }, "sourceFormat": { "description": "Specifies whether the Config Sync Repo is in \u201chierarchical\u201d or \u201cunstructured\u201d mode.", "type": "string" @@ -862,6 +869,22 @@ "description": "The state of ConfigSync's deployment on a cluster", "id": "ConfigManagementConfigSyncDeploymentState", "properties": { + "admissionWebhook": { + "description": "Deployment state of admission-webhook", + "enum": [ + "DEPLOYMENT_STATE_UNSPECIFIED", + "NOT_INSTALLED", + "INSTALLED", + "ERROR" + ], + "enumDescriptions": [ + "Deployment's state cannot be determined", + "Deployment is not installed", + "Deployment is installed", + "Deployment was attempted to be installed, but has errors" + ], + "type": "string" + }, "gitSync": { "description": "Deployment state of the git-sync pod", "enum": [ @@ -984,6 +1007,10 @@ "description": "Specific versioning information pertaining to ConfigSync's Pods", "id": "ConfigManagementConfigSyncVersion", "properties": { + "admissionWebhook": { + "description": "Version of the deployed admission_webhook pod", + "type": "string" + }, "gitSync": { "description": "Version of the deployed git-sync pod", "type": "string" @@ -1011,6 +1038,25 @@ }, "type": "object" }, + "ConfigManagementContainerResourceRequirements": { + "description": "ResourceRequirements allows to override the CPU and memory resource requirements of a container.", + "id": "ConfigManagementContainerResourceRequirements", + "properties": { + "containerName": { + "description": "Name of the container", + "type": "string" + }, + "cpuLimit": { + "$ref": "ConfigManagementQuantity", + "description": "Allows to override the CPU limit of a container" + }, + "memoryLimit": { + "$ref": "ConfigManagementQuantity", + "description": "Allows to override the memory limit of a container" + } + }, + "type": "object" + }, "ConfigManagementErrorResource": { "description": "Model for a config file in the git repo with an associated Sync error", "id": "ConfigManagementErrorResource", @@ -1101,6 +1147,10 @@ "description": "URL for the HTTPS proxy to be used when communicating with the Git repo.", "type": "string" }, + "noSslVerify": { + "description": "Enable or disable the SSL certificate verification Default: false.", + "type": "boolean" + }, "policyDir": { "description": "The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.", "type": "string" @@ -1113,6 +1163,11 @@ "description": "The branch of the repository to sync from. Default: master.", "type": "string" }, + "syncDepth": { + "description": "The depth of git commits synced by the git-sync container.", + "format": "int64", + "type": "string" + }, "syncRepo": { "description": "The URL of the Git repository to use as the source of truth.", "type": "string" @@ -1408,6 +1463,17 @@ }, "type": "object" }, + "ConfigManagementQuantity": { + "description": "The view model of a single quantity, e.g. \"800 MiB\". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto", + "id": "ConfigManagementQuantity", + "properties": { + "string": { + "description": "Stringified version of the quantity, e.g., \"800 MiB\".", + "type": "string" + } + }, + "type": "object" + }, "ConfigManagementSyncError": { "description": "An ACM created error representing a problem syncing configurations", "id": "ConfigManagementSyncError", diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1alpha2.json b/googleapiclient/discovery_cache/documents/gkehub.v1alpha2.json index 2af97cb5c1e..162f0188ffd 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v1alpha2.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v1alpha2.json @@ -652,7 +652,7 @@ } } }, - "revision": "20210910", + "revision": "20210920", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1beta.json b/googleapiclient/discovery_cache/documents/gkehub.v1beta.json index 3bd2578180a..d55114799b1 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v1beta.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v1beta.json @@ -670,7 +670,7 @@ } } }, - "revision": "20210910", + "revision": "20210920", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "AuditConfig": { @@ -829,6 +829,13 @@ "$ref": "ConfigManagementGitConfig", "description": "Git repo configuration for the cluster." }, + "resourceRequirements": { + "additionalProperties": { + "$ref": "ConfigManagementContainerResourceRequirements" + }, + "description": "Specifies CPU and memory limits for containers, keyed by container name", + "type": "object" + }, "sourceFormat": { "description": "Specifies whether the Config Sync Repo is in \u201chierarchical\u201d or \u201cunstructured\u201d mode.", "type": "string" @@ -840,6 +847,22 @@ "description": "The state of ConfigSync's deployment on a cluster", "id": "ConfigManagementConfigSyncDeploymentState", "properties": { + "admissionWebhook": { + "description": "Deployment state of admission-webhook", + "enum": [ + "DEPLOYMENT_STATE_UNSPECIFIED", + "NOT_INSTALLED", + "INSTALLED", + "ERROR" + ], + "enumDescriptions": [ + "Deployment's state cannot be determined", + "Deployment is not installed", + "Deployment is installed", + "Deployment was attempted to be installed, but has errors" + ], + "type": "string" + }, "gitSync": { "description": "Deployment state of the git-sync pod", "enum": [ @@ -962,6 +985,10 @@ "description": "Specific versioning information pertaining to ConfigSync's Pods", "id": "ConfigManagementConfigSyncVersion", "properties": { + "admissionWebhook": { + "description": "Version of the deployed admission_webhook pod", + "type": "string" + }, "gitSync": { "description": "Version of the deployed git-sync pod", "type": "string" @@ -989,6 +1016,25 @@ }, "type": "object" }, + "ConfigManagementContainerResourceRequirements": { + "description": "ResourceRequirements allows to override the CPU and memory resource requirements of a container.", + "id": "ConfigManagementContainerResourceRequirements", + "properties": { + "containerName": { + "description": "Name of the container", + "type": "string" + }, + "cpuLimit": { + "$ref": "ConfigManagementQuantity", + "description": "Allows to override the CPU limit of a container" + }, + "memoryLimit": { + "$ref": "ConfigManagementQuantity", + "description": "Allows to override the memory limit of a container" + } + }, + "type": "object" + }, "ConfigManagementErrorResource": { "description": "Model for a config file in the git repo with an associated Sync error", "id": "ConfigManagementErrorResource", @@ -1063,6 +1109,10 @@ "description": "URL for the HTTPS proxy to be used when communicating with the Git repo.", "type": "string" }, + "noSslVerify": { + "description": "Enable or disable the SSL certificate verification Default: false.", + "type": "boolean" + }, "policyDir": { "description": "The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.", "type": "string" @@ -1075,6 +1125,11 @@ "description": "The branch of the repository to sync from. Default: master.", "type": "string" }, + "syncDepth": { + "description": "The depth of git commits synced by the git-sync container.", + "format": "int64", + "type": "string" + }, "syncRepo": { "description": "The URL of the Git repository to use as the source of truth.", "type": "string" @@ -1366,6 +1421,17 @@ }, "type": "object" }, + "ConfigManagementQuantity": { + "description": "The view model of a single quantity, e.g. \"800 MiB\". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto", + "id": "ConfigManagementQuantity", + "properties": { + "string": { + "description": "Stringified version of the quantity, e.g., \"800 MiB\".", + "type": "string" + } + }, + "type": "object" + }, "ConfigManagementSyncError": { "description": "An ACM created error representing a problem syncing configurations", "id": "ConfigManagementSyncError", diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1beta1.json b/googleapiclient/discovery_cache/documents/gkehub.v1beta1.json index 61a49957eb1..1f6fc6064bc 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v1beta1.json @@ -706,7 +706,7 @@ } } }, - "revision": "20210910", + "revision": "20210920", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/gmail.v1.json b/googleapiclient/discovery_cache/documents/gmail.v1.json index aab0f319206..26fe0c0c5a4 100644 --- a/googleapiclient/discovery_cache/documents/gmail.v1.json +++ b/googleapiclient/discovery_cache/documents/gmail.v1.json @@ -2682,7 +2682,7 @@ } } }, - "revision": "20210906", + "revision": "20210920", "rootUrl": "https://gmail.googleapis.com/", "schemas": { "AutoForwarding": { diff --git a/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1.json b/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1.json index 89d69a47f70..7f318139c06 100644 --- a/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1.json +++ b/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1.json @@ -265,7 +265,7 @@ } } }, - "revision": "20210918", + "revision": "20210924", "rootUrl": "https://gmailpostmastertools.googleapis.com/", "schemas": { "DeliveryError": { diff --git a/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1beta1.json b/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1beta1.json index 61c9165c1ae..ff95b153286 100644 --- a/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1beta1.json @@ -265,7 +265,7 @@ } } }, - "revision": "20210918", + "revision": "20210924", "rootUrl": "https://gmailpostmastertools.googleapis.com/", "schemas": { "DeliveryError": { diff --git a/googleapiclient/discovery_cache/documents/groupsmigration.v1.json b/googleapiclient/discovery_cache/documents/groupsmigration.v1.json index 6f103c1f7ea..20fccf86c5c 100644 --- a/googleapiclient/discovery_cache/documents/groupsmigration.v1.json +++ b/googleapiclient/discovery_cache/documents/groupsmigration.v1.json @@ -146,7 +146,7 @@ } } }, - "revision": "20210910", + "revision": "20210919", "rootUrl": "https://groupsmigration.googleapis.com/", "schemas": { "Groups": { diff --git a/googleapiclient/discovery_cache/documents/groupssettings.v1.json b/googleapiclient/discovery_cache/documents/groupssettings.v1.json index e62c4a6458d..7bfc551b551 100644 --- a/googleapiclient/discovery_cache/documents/groupssettings.v1.json +++ b/googleapiclient/discovery_cache/documents/groupssettings.v1.json @@ -152,7 +152,7 @@ } } }, - "revision": "20210914", + "revision": "20210921", "rootUrl": "https://www.googleapis.com/", "schemas": { "Groups": { diff --git a/googleapiclient/discovery_cache/documents/healthcare.v1.json b/googleapiclient/discovery_cache/documents/healthcare.v1.json index d45d11c7aa0..05eeefa487c 100644 --- a/googleapiclient/discovery_cache/documents/healthcare.v1.json +++ b/googleapiclient/discovery_cache/documents/healthcare.v1.json @@ -3956,7 +3956,7 @@ } } }, - "revision": "20210909", + "revision": "20210914", "rootUrl": "https://healthcare.googleapis.com/", "schemas": { "ActivateConsentRequest": { @@ -4071,7 +4071,7 @@ "id": "AttributeDefinition", "properties": { "allowedValues": { - "description": "Required. Possible values for the attribute. The number of allowed values must not exceed 100. An empty list is invalid. The list can only be expanded after creation.", + "description": "Required. Possible values for the attribute. The number of allowed values must not exceed 500. An empty list is invalid. The list can only be expanded after creation.", "items": { "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/healthcare.v1beta1.json b/googleapiclient/discovery_cache/documents/healthcare.v1beta1.json index cf53cbcec61..64553b0c9fa 100644 --- a/googleapiclient/discovery_cache/documents/healthcare.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/healthcare.v1beta1.json @@ -4865,7 +4865,7 @@ } } }, - "revision": "20210909", + "revision": "20210914", "rootUrl": "https://healthcare.googleapis.com/", "schemas": { "ActivateConsentRequest": { @@ -5058,7 +5058,7 @@ "id": "AttributeDefinition", "properties": { "allowedValues": { - "description": "Required. Possible values for the attribute. The number of allowed values must not exceed 100. An empty list is invalid. The list can only be expanded after creation.", + "description": "Required. Possible values for the attribute. The number of allowed values must not exceed 500. An empty list is invalid. The list can only be expanded after creation.", "items": { "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/homegraph.v1.json b/googleapiclient/discovery_cache/documents/homegraph.v1.json index b40886f2db9..e6584f6b955 100644 --- a/googleapiclient/discovery_cache/documents/homegraph.v1.json +++ b/googleapiclient/discovery_cache/documents/homegraph.v1.json @@ -216,7 +216,7 @@ } } }, - "revision": "20210910", + "revision": "20210920", "rootUrl": "https://homegraph.googleapis.com/", "schemas": { "AgentDeviceId": { diff --git a/googleapiclient/discovery_cache/documents/iam.v1.json b/googleapiclient/discovery_cache/documents/iam.v1.json index dd69da408e1..edaea64b72f 100644 --- a/googleapiclient/discovery_cache/documents/iam.v1.json +++ b/googleapiclient/discovery_cache/documents/iam.v1.json @@ -1484,7 +1484,7 @@ ] }, "disable": { - "description": "Disable a ServiceAccountKey. A disabled service account key can be enabled through EnableServiceAccountKey. The API is currently in preview phase.", + "description": "Disable a ServiceAccountKey. A disabled service account key can be enabled through EnableServiceAccountKey.", "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys/{keysId}:disable", "httpMethod": "POST", "id": "iam.projects.serviceAccounts.keys.disable", @@ -1512,7 +1512,7 @@ ] }, "enable": { - "description": "Enable a ServiceAccountKey. The API is currently in preview phase.", + "description": "Enable a ServiceAccountKey.", "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys/{keysId}:enable", "httpMethod": "POST", "id": "iam.projects.serviceAccounts.keys.enable", @@ -1752,7 +1752,7 @@ } } }, - "revision": "20210909", + "revision": "20210918", "rootUrl": "https://iam.googleapis.com/", "schemas": { "AdminAuditData": { diff --git a/googleapiclient/discovery_cache/documents/iamcredentials.v1.json b/googleapiclient/discovery_cache/documents/iamcredentials.v1.json index 14d14943c52..45fe82c1466 100644 --- a/googleapiclient/discovery_cache/documents/iamcredentials.v1.json +++ b/googleapiclient/discovery_cache/documents/iamcredentials.v1.json @@ -226,7 +226,7 @@ } } }, - "revision": "20210910", + "revision": "20210916", "rootUrl": "https://iamcredentials.googleapis.com/", "schemas": { "GenerateAccessTokenRequest": { diff --git a/googleapiclient/discovery_cache/documents/iap.v1.json b/googleapiclient/discovery_cache/documents/iap.v1.json index 93cd65d65c7..80fd8545770 100644 --- a/googleapiclient/discovery_cache/documents/iap.v1.json +++ b/googleapiclient/discovery_cache/documents/iap.v1.json @@ -487,7 +487,7 @@ } } }, - "revision": "20210820", + "revision": "20210924", "rootUrl": "https://iap.googleapis.com/", "schemas": { "AccessDeniedPageSettings": { diff --git a/googleapiclient/discovery_cache/documents/iap.v1beta1.json b/googleapiclient/discovery_cache/documents/iap.v1beta1.json index dcac02e934e..0685cb79b85 100644 --- a/googleapiclient/discovery_cache/documents/iap.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/iap.v1beta1.json @@ -194,7 +194,7 @@ } } }, - "revision": "20210820", + "revision": "20210924", "rootUrl": "https://iap.googleapis.com/", "schemas": { "Binding": { diff --git a/googleapiclient/discovery_cache/documents/ideahub.v1alpha.json b/googleapiclient/discovery_cache/documents/ideahub.v1alpha.json index 3b001422044..75abd26d4cc 100644 --- a/googleapiclient/discovery_cache/documents/ideahub.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/ideahub.v1alpha.json @@ -331,7 +331,7 @@ } } }, - "revision": "20210918", + "revision": "20210924", "rootUrl": "https://ideahub.googleapis.com/", "schemas": { "GoogleSearchIdeahubV1alphaAvailableLocale": { diff --git a/googleapiclient/discovery_cache/documents/ideahub.v1beta.json b/googleapiclient/discovery_cache/documents/ideahub.v1beta.json index c815acc9c75..63c0c3ffa2e 100644 --- a/googleapiclient/discovery_cache/documents/ideahub.v1beta.json +++ b/googleapiclient/discovery_cache/documents/ideahub.v1beta.json @@ -288,7 +288,7 @@ } } }, - "revision": "20210918", + "revision": "20210924", "rootUrl": "https://ideahub.googleapis.com/", "schemas": { "GoogleSearchIdeahubV1betaAvailableLocale": { diff --git a/googleapiclient/discovery_cache/documents/indexing.v3.json b/googleapiclient/discovery_cache/documents/indexing.v3.json index f553338e7e6..f43fb0bf613 100644 --- a/googleapiclient/discovery_cache/documents/indexing.v3.json +++ b/googleapiclient/discovery_cache/documents/indexing.v3.json @@ -149,7 +149,7 @@ } } }, - "revision": "20210907", + "revision": "20210914", "rootUrl": "https://indexing.googleapis.com/", "schemas": { "PublishUrlNotificationResponse": { diff --git a/googleapiclient/discovery_cache/documents/jobs.v3.json b/googleapiclient/discovery_cache/documents/jobs.v3.json index 04baf10cf7a..d3701cb7ba8 100644 --- a/googleapiclient/discovery_cache/documents/jobs.v3.json +++ b/googleapiclient/discovery_cache/documents/jobs.v3.json @@ -651,7 +651,7 @@ } } }, - "revision": "20210830", + "revision": "20210914", "rootUrl": "https://jobs.googleapis.com/", "schemas": { "ApplicationInfo": { diff --git a/googleapiclient/discovery_cache/documents/jobs.v3p1beta1.json b/googleapiclient/discovery_cache/documents/jobs.v3p1beta1.json index 3e2395f024c..447b80a5fbc 100644 --- a/googleapiclient/discovery_cache/documents/jobs.v3p1beta1.json +++ b/googleapiclient/discovery_cache/documents/jobs.v3p1beta1.json @@ -681,7 +681,7 @@ } } }, - "revision": "20210830", + "revision": "20210914", "rootUrl": "https://jobs.googleapis.com/", "schemas": { "ApplicationInfo": { diff --git a/googleapiclient/discovery_cache/documents/jobs.v4.json b/googleapiclient/discovery_cache/documents/jobs.v4.json index 591d1d9818d..34978e0ff32 100644 --- a/googleapiclient/discovery_cache/documents/jobs.v4.json +++ b/googleapiclient/discovery_cache/documents/jobs.v4.json @@ -903,7 +903,7 @@ } } }, - "revision": "20210830", + "revision": "20210914", "rootUrl": "https://jobs.googleapis.com/", "schemas": { "ApplicationInfo": { diff --git a/googleapiclient/discovery_cache/documents/keep.v1.json b/googleapiclient/discovery_cache/documents/keep.v1.json index 1fef707ae7e..0a0ab8ddfde 100644 --- a/googleapiclient/discovery_cache/documents/keep.v1.json +++ b/googleapiclient/discovery_cache/documents/keep.v1.json @@ -314,7 +314,7 @@ } } }, - "revision": "20210920", + "revision": "20210921", "rootUrl": "https://keep.googleapis.com/", "schemas": { "Attachment": { diff --git a/googleapiclient/discovery_cache/documents/kgsearch.v1.json b/googleapiclient/discovery_cache/documents/kgsearch.v1.json index b8bba2f265d..a648b7f7c83 100644 --- a/googleapiclient/discovery_cache/documents/kgsearch.v1.json +++ b/googleapiclient/discovery_cache/documents/kgsearch.v1.json @@ -151,7 +151,7 @@ } } }, - "revision": "20210827", + "revision": "20210918", "rootUrl": "https://kgsearch.googleapis.com/", "schemas": { "SearchResponse": { diff --git a/googleapiclient/discovery_cache/documents/language.v1.json b/googleapiclient/discovery_cache/documents/language.v1.json index 625ab083c2f..e1d87afc31f 100644 --- a/googleapiclient/discovery_cache/documents/language.v1.json +++ b/googleapiclient/discovery_cache/documents/language.v1.json @@ -227,7 +227,7 @@ } } }, - "revision": "20210910", + "revision": "20210918", "rootUrl": "https://language.googleapis.com/", "schemas": { "AnalyzeEntitiesRequest": { diff --git a/googleapiclient/discovery_cache/documents/language.v1beta1.json b/googleapiclient/discovery_cache/documents/language.v1beta1.json index c9d0df3b0b3..20abd9e158e 100644 --- a/googleapiclient/discovery_cache/documents/language.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/language.v1beta1.json @@ -189,7 +189,7 @@ } } }, - "revision": "20210910", + "revision": "20210918", "rootUrl": "https://language.googleapis.com/", "schemas": { "AnalyzeEntitiesRequest": { diff --git a/googleapiclient/discovery_cache/documents/language.v1beta2.json b/googleapiclient/discovery_cache/documents/language.v1beta2.json index 7695363e0bb..9ca3c9ac466 100644 --- a/googleapiclient/discovery_cache/documents/language.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/language.v1beta2.json @@ -227,7 +227,7 @@ } } }, - "revision": "20210910", + "revision": "20210918", "rootUrl": "https://language.googleapis.com/", "schemas": { "AnalyzeEntitiesRequest": { diff --git a/googleapiclient/discovery_cache/documents/libraryagent.v1.json b/googleapiclient/discovery_cache/documents/libraryagent.v1.json index ada81fe501a..3ada5e869eb 100644 --- a/googleapiclient/discovery_cache/documents/libraryagent.v1.json +++ b/googleapiclient/discovery_cache/documents/libraryagent.v1.json @@ -279,7 +279,7 @@ } } }, - "revision": "20210918", + "revision": "20210924", "rootUrl": "https://libraryagent.googleapis.com/", "schemas": { "GoogleExampleLibraryagentV1Book": { diff --git a/googleapiclient/discovery_cache/documents/licensing.v1.json b/googleapiclient/discovery_cache/documents/licensing.v1.json index 0d891f6b366..7511256b716 100644 --- a/googleapiclient/discovery_cache/documents/licensing.v1.json +++ b/googleapiclient/discovery_cache/documents/licensing.v1.json @@ -400,7 +400,7 @@ } } }, - "revision": "20210918", + "revision": "20210925", "rootUrl": "https://licensing.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/localservices.v1.json b/googleapiclient/discovery_cache/documents/localservices.v1.json index 94daf08f110..4852cd289e0 100644 --- a/googleapiclient/discovery_cache/documents/localservices.v1.json +++ b/googleapiclient/discovery_cache/documents/localservices.v1.json @@ -250,7 +250,7 @@ } } }, - "revision": "20210918", + "revision": "20210924", "rootUrl": "https://localservices.googleapis.com/", "schemas": { "GoogleAdsHomeservicesLocalservicesV1AccountReport": { diff --git a/googleapiclient/discovery_cache/documents/manufacturers.v1.json b/googleapiclient/discovery_cache/documents/manufacturers.v1.json index f044269dcdd..4ef671ca2c3 100644 --- a/googleapiclient/discovery_cache/documents/manufacturers.v1.json +++ b/googleapiclient/discovery_cache/documents/manufacturers.v1.json @@ -287,7 +287,7 @@ } } }, - "revision": "20210911", + "revision": "20210923", "rootUrl": "https://manufacturers.googleapis.com/", "schemas": { "Attributes": { diff --git a/googleapiclient/discovery_cache/documents/memcache.v1.json b/googleapiclient/discovery_cache/documents/memcache.v1.json index ec0c84b6e46..9e7bfe7fdc4 100644 --- a/googleapiclient/discovery_cache/documents/memcache.v1.json +++ b/googleapiclient/discovery_cache/documents/memcache.v1.json @@ -528,7 +528,7 @@ } } }, - "revision": "20210907", + "revision": "20210917", "rootUrl": "https://memcache.googleapis.com/", "schemas": { "ApplyParametersRequest": { diff --git a/googleapiclient/discovery_cache/documents/memcache.v1beta2.json b/googleapiclient/discovery_cache/documents/memcache.v1beta2.json index 6b020d8c0f2..1f66e93bd4d 100644 --- a/googleapiclient/discovery_cache/documents/memcache.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/memcache.v1beta2.json @@ -556,7 +556,7 @@ } } }, - "revision": "20210907", + "revision": "20210917", "rootUrl": "https://memcache.googleapis.com/", "schemas": { "ApplyParametersRequest": { diff --git a/googleapiclient/discovery_cache/documents/metastore.v1alpha.json b/googleapiclient/discovery_cache/documents/metastore.v1alpha.json index 3b376d88c3b..7983785a576 100644 --- a/googleapiclient/discovery_cache/documents/metastore.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/metastore.v1alpha.json @@ -1170,7 +1170,7 @@ } } }, - "revision": "20210908", + "revision": "20210921", "rootUrl": "https://metastore.googleapis.com/", "schemas": { "AuditConfig": { @@ -2117,7 +2117,7 @@ }, "maintenanceWindow": { "$ref": "MaintenanceWindow", - "description": "The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time." + "description": "The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type." }, "metadataIntegration": { "$ref": "MetadataIntegration", diff --git a/googleapiclient/discovery_cache/documents/metastore.v1beta.json b/googleapiclient/discovery_cache/documents/metastore.v1beta.json index 768bfdb747c..24db20aab0a 100644 --- a/googleapiclient/discovery_cache/documents/metastore.v1beta.json +++ b/googleapiclient/discovery_cache/documents/metastore.v1beta.json @@ -986,7 +986,7 @@ } } }, - "revision": "20210908", + "revision": "20210921", "rootUrl": "https://metastore.googleapis.com/", "schemas": { "AuditConfig": { @@ -1933,7 +1933,7 @@ }, "maintenanceWindow": { "$ref": "MaintenanceWindow", - "description": "The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time." + "description": "The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type." }, "metadataIntegration": { "$ref": "MetadataIntegration", diff --git a/googleapiclient/discovery_cache/documents/monitoring.v1.json b/googleapiclient/discovery_cache/documents/monitoring.v1.json index a7bb2acc9d4..a5f4a4ad7e9 100644 --- a/googleapiclient/discovery_cache/documents/monitoring.v1.json +++ b/googleapiclient/discovery_cache/documents/monitoring.v1.json @@ -441,7 +441,7 @@ } } }, - "revision": "20210830", + "revision": "20210922", "rootUrl": "https://monitoring.googleapis.com/", "schemas": { "Aggregation": { @@ -1293,6 +1293,44 @@ }, "type": "object" }, + "TableDataSet": { + "description": "Groups a time series query definition with table options.", + "id": "TableDataSet", + "properties": { + "minAlignmentPeriod": { + "description": "Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the min_alignment_period should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.", + "format": "google-duration", + "type": "string" + }, + "tableDisplayOptions": { + "$ref": "TableDisplayOptions", + "description": "Optional. Table display options for configuring how the table is rendered." + }, + "tableTemplate": { + "description": "Optional. A template string for naming TimeSeries in the resulting data set. This should be a string with interpolations of the form ${label_name}, which will resolve to the label's value i.e. \"${resource.labels.project_id}.\"", + "type": "string" + }, + "timeSeriesQuery": { + "$ref": "TimeSeriesQuery", + "description": "Required. Fields for querying time series data from the Stackdriver metrics API." + } + }, + "type": "object" + }, + "TableDisplayOptions": { + "description": "Table display options that can be reused.", + "id": "TableDisplayOptions", + "properties": { + "shownColumns": { + "description": "Optional. Columns to display in the table. Leave empty to display all available columns. Note: This field is for future features and is not currently used.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "Text": { "description": "A widget that displays textual content.", "id": "Text", @@ -1484,6 +1522,20 @@ }, "type": "object" }, + "TimeSeriesTable": { + "description": "A table that displays time series data.", + "id": "TimeSeriesTable", + "properties": { + "dataSets": { + "description": "Required. The data displayed in this table.", + "items": { + "$ref": "TableDataSet" + }, + "type": "array" + } + }, + "type": "object" + }, "Type": { "description": "A protocol buffer message type.", "id": "Type", @@ -1552,6 +1604,10 @@ "$ref": "Text", "description": "A raw string or markdown displaying textual content." }, + "timeSeriesTable": { + "$ref": "TimeSeriesTable", + "description": "A widget that displays time series data in a tabular format." + }, "title": { "description": "Optional. The title of the widget.", "type": "string" diff --git a/googleapiclient/discovery_cache/documents/monitoring.v3.json b/googleapiclient/discovery_cache/documents/monitoring.v3.json index 8b3e3de819a..d8f6ae5e11a 100644 --- a/googleapiclient/discovery_cache/documents/monitoring.v3.json +++ b/googleapiclient/discovery_cache/documents/monitoring.v3.json @@ -2541,7 +2541,7 @@ } } }, - "revision": "20210830", + "revision": "20210922", "rootUrl": "https://monitoring.googleapis.com/", "schemas": { "Aggregation": { diff --git a/googleapiclient/discovery_cache/documents/mybusinessaccountmanagement.v1.json b/googleapiclient/discovery_cache/documents/mybusinessaccountmanagement.v1.json index 166edf3dea8..e79780a9d44 100644 --- a/googleapiclient/discovery_cache/documents/mybusinessaccountmanagement.v1.json +++ b/googleapiclient/discovery_cache/documents/mybusinessaccountmanagement.v1.json @@ -530,7 +530,7 @@ } } }, - "revision": "20210918", + "revision": "20210925", "rootUrl": "https://mybusinessaccountmanagement.googleapis.com/", "schemas": { "AcceptInvitationRequest": { diff --git a/googleapiclient/discovery_cache/documents/mybusinessbusinessinformation.v1.json b/googleapiclient/discovery_cache/documents/mybusinessbusinessinformation.v1.json index 0a19b4e0708..a74bc281dda 100644 --- a/googleapiclient/discovery_cache/documents/mybusinessbusinessinformation.v1.json +++ b/googleapiclient/discovery_cache/documents/mybusinessbusinessinformation.v1.json @@ -662,7 +662,7 @@ } } }, - "revision": "20210918", + "revision": "20210925", "rootUrl": "https://mybusinessbusinessinformation.googleapis.com/", "schemas": { "AdWordsLocationExtensions": { diff --git a/googleapiclient/discovery_cache/documents/mybusinesslodging.v1.json b/googleapiclient/discovery_cache/documents/mybusinesslodging.v1.json index fb5265be178..78a1f541228 100644 --- a/googleapiclient/discovery_cache/documents/mybusinesslodging.v1.json +++ b/googleapiclient/discovery_cache/documents/mybusinesslodging.v1.json @@ -194,7 +194,7 @@ } } }, - "revision": "20210918", + "revision": "20210925", "rootUrl": "https://mybusinesslodging.googleapis.com/", "schemas": { "Accessibility": { diff --git a/googleapiclient/discovery_cache/documents/mybusinessnotifications.v1.json b/googleapiclient/discovery_cache/documents/mybusinessnotifications.v1.json index 01adf7e9b0c..b57f0ffc010 100644 --- a/googleapiclient/discovery_cache/documents/mybusinessnotifications.v1.json +++ b/googleapiclient/discovery_cache/documents/mybusinessnotifications.v1.json @@ -154,7 +154,7 @@ } } }, - "revision": "20210918", + "revision": "20210925", "rootUrl": "https://mybusinessnotifications.googleapis.com/", "schemas": { "NotificationSetting": { diff --git a/googleapiclient/discovery_cache/documents/mybusinessplaceactions.v1.json b/googleapiclient/discovery_cache/documents/mybusinessplaceactions.v1.json index d616631e266..0dd51dab2aa 100644 --- a/googleapiclient/discovery_cache/documents/mybusinessplaceactions.v1.json +++ b/googleapiclient/discovery_cache/documents/mybusinessplaceactions.v1.json @@ -281,7 +281,7 @@ } } }, - "revision": "20210918", + "revision": "20210925", "rootUrl": "https://mybusinessplaceactions.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/mybusinessverifications.v1.json b/googleapiclient/discovery_cache/documents/mybusinessverifications.v1.json index f2746fd4cdf..6163af55110 100644 --- a/googleapiclient/discovery_cache/documents/mybusinessverifications.v1.json +++ b/googleapiclient/discovery_cache/documents/mybusinessverifications.v1.json @@ -256,7 +256,7 @@ } } }, - "revision": "20210918", + "revision": "20210925", "rootUrl": "https://mybusinessverifications.googleapis.com/", "schemas": { "AddressVerificationData": { diff --git a/googleapiclient/discovery_cache/documents/ondemandscanning.v1.json b/googleapiclient/discovery_cache/documents/ondemandscanning.v1.json index af491488f09..04a9cbf9876 100644 --- a/googleapiclient/discovery_cache/documents/ondemandscanning.v1.json +++ b/googleapiclient/discovery_cache/documents/ondemandscanning.v1.json @@ -339,7 +339,7 @@ } } }, - "revision": "20210911", + "revision": "20210918", "rootUrl": "https://ondemandscanning.googleapis.com/", "schemas": { "AliasContext": { diff --git a/googleapiclient/discovery_cache/documents/ondemandscanning.v1beta1.json b/googleapiclient/discovery_cache/documents/ondemandscanning.v1beta1.json index 62b221d54d9..ba43f6bd5e2 100644 --- a/googleapiclient/discovery_cache/documents/ondemandscanning.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/ondemandscanning.v1beta1.json @@ -339,7 +339,7 @@ } } }, - "revision": "20210911", + "revision": "20210918", "rootUrl": "https://ondemandscanning.googleapis.com/", "schemas": { "AliasContext": { diff --git a/googleapiclient/discovery_cache/documents/orgpolicy.v2.json b/googleapiclient/discovery_cache/documents/orgpolicy.v2.json index 40e6991cb90..79f695eb3cb 100644 --- a/googleapiclient/discovery_cache/documents/orgpolicy.v2.json +++ b/googleapiclient/discovery_cache/documents/orgpolicy.v2.json @@ -751,7 +751,7 @@ } } }, - "revision": "20210918", + "revision": "20210924", "rootUrl": "https://orgpolicy.googleapis.com/", "schemas": { "GoogleCloudOrgpolicyV2Constraint": { diff --git a/googleapiclient/discovery_cache/documents/osconfig.v1.json b/googleapiclient/discovery_cache/documents/osconfig.v1.json index 56343fdfa09..64c6958c41d 100644 --- a/googleapiclient/discovery_cache/documents/osconfig.v1.json +++ b/googleapiclient/discovery_cache/documents/osconfig.v1.json @@ -777,7 +777,7 @@ } } }, - "revision": "20210914", + "revision": "20210918", "rootUrl": "https://osconfig.googleapis.com/", "schemas": { "AptSettings": { diff --git a/googleapiclient/discovery_cache/documents/osconfig.v1alpha.json b/googleapiclient/discovery_cache/documents/osconfig.v1alpha.json index 5a24e990b39..e35f96efc17 100644 --- a/googleapiclient/discovery_cache/documents/osconfig.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/osconfig.v1alpha.json @@ -686,7 +686,7 @@ } } }, - "revision": "20210914", + "revision": "20210918", "rootUrl": "https://osconfig.googleapis.com/", "schemas": { "CVSSv3": { diff --git a/googleapiclient/discovery_cache/documents/osconfig.v1beta.json b/googleapiclient/discovery_cache/documents/osconfig.v1beta.json index e6ea63a9cc3..68b3904d0e2 100644 --- a/googleapiclient/discovery_cache/documents/osconfig.v1beta.json +++ b/googleapiclient/discovery_cache/documents/osconfig.v1beta.json @@ -599,7 +599,7 @@ } } }, - "revision": "20210914", + "revision": "20210918", "rootUrl": "https://osconfig.googleapis.com/", "schemas": { "AptRepository": { diff --git a/googleapiclient/discovery_cache/documents/oslogin.v1.json b/googleapiclient/discovery_cache/documents/oslogin.v1.json index b50d8baa858..f6028670691 100644 --- a/googleapiclient/discovery_cache/documents/oslogin.v1.json +++ b/googleapiclient/discovery_cache/documents/oslogin.v1.json @@ -5,8 +5,14 @@ "https://www.googleapis.com/auth/cloud-platform": { "description": "See, edit, configure, and delete your Google Cloud data and see the email address for your Google Account." }, + "https://www.googleapis.com/auth/cloud-platform.read-only": { + "description": "View your data across Google Cloud services and see the email address of your Google Account" + }, "https://www.googleapis.com/auth/compute": { "description": "View and manage your Google Compute Engine resources" + }, + "https://www.googleapis.com/auth/compute.readonly": { + "description": "View your Google Compute Engine resources" } } } @@ -143,7 +149,9 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, "importSshPublicKey": { @@ -306,7 +314,7 @@ } } }, - "revision": "20210910", + "revision": "20210920", "rootUrl": "https://oslogin.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/oslogin.v1alpha.json b/googleapiclient/discovery_cache/documents/oslogin.v1alpha.json index a1240c311df..94b1685ef4e 100644 --- a/googleapiclient/discovery_cache/documents/oslogin.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/oslogin.v1alpha.json @@ -374,7 +374,7 @@ } } }, - "revision": "20210910", + "revision": "20210920", "rootUrl": "https://oslogin.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/oslogin.v1beta.json b/googleapiclient/discovery_cache/documents/oslogin.v1beta.json index f2c5715a87a..cd643e25a42 100644 --- a/googleapiclient/discovery_cache/documents/oslogin.v1beta.json +++ b/googleapiclient/discovery_cache/documents/oslogin.v1beta.json @@ -344,7 +344,7 @@ } } }, - "revision": "20210910", + "revision": "20210920", "rootUrl": "https://oslogin.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/pagespeedonline.v5.json b/googleapiclient/discovery_cache/documents/pagespeedonline.v5.json index 834029cc399..695da815405 100644 --- a/googleapiclient/discovery_cache/documents/pagespeedonline.v5.json +++ b/googleapiclient/discovery_cache/documents/pagespeedonline.v5.json @@ -193,7 +193,7 @@ } } }, - "revision": "20210917", + "revision": "20210924", "rootUrl": "https://pagespeedonline.googleapis.com/", "schemas": { "AuditRefs": { diff --git a/googleapiclient/discovery_cache/documents/paymentsresellersubscription.v1.json b/googleapiclient/discovery_cache/documents/paymentsresellersubscription.v1.json index 794aae1198a..ccacee39514 100644 --- a/googleapiclient/discovery_cache/documents/paymentsresellersubscription.v1.json +++ b/googleapiclient/discovery_cache/documents/paymentsresellersubscription.v1.json @@ -366,7 +366,7 @@ } } }, - "revision": "20210920", + "revision": "20210927", "rootUrl": "https://paymentsresellersubscription.googleapis.com/", "schemas": { "GoogleCloudPaymentsResellerSubscriptionV1CancelSubscriptionRequest": { @@ -386,6 +386,7 @@ "CANCELLATION_REASON_PAST_DUE", "CANCELLATION_REASON_ACCOUNT_CLOSED", "CANCELLATION_REASON_UPGRADE_DOWNGRADE", + "CANCELLATION_REASON_USER_DELINQUENCY", "CANCELLATION_REASON_OTHER" ], "enumDescriptions": [ @@ -396,6 +397,7 @@ "Payment is past due.", "User account closed.", "Used for notification only, do not use in Cancel API. Cancellation due to upgrade or downgrade.", + "Cancellation due to user delinquency", "Other reason." ], "type": "string" @@ -761,6 +763,7 @@ "CANCELLATION_REASON_PAST_DUE", "CANCELLATION_REASON_ACCOUNT_CLOSED", "CANCELLATION_REASON_UPGRADE_DOWNGRADE", + "CANCELLATION_REASON_USER_DELINQUENCY", "CANCELLATION_REASON_OTHER" ], "enumDescriptions": [ @@ -771,6 +774,7 @@ "Payment is past due.", "User account closed.", "Used for notification only, do not use in Cancel API. Cancellation due to upgrade or downgrade.", + "Cancellation due to user delinquency", "Other reason." ], "type": "string" diff --git a/googleapiclient/discovery_cache/documents/people.v1.json b/googleapiclient/discovery_cache/documents/people.v1.json index 2b36c0b629c..092fcf2a3bf 100644 --- a/googleapiclient/discovery_cache/documents/people.v1.json +++ b/googleapiclient/discovery_cache/documents/people.v1.json @@ -1172,7 +1172,7 @@ } } }, - "revision": "20210916", + "revision": "20210923", "rootUrl": "https://people.googleapis.com/", "schemas": { "Address": { diff --git a/googleapiclient/discovery_cache/documents/playablelocations.v3.json b/googleapiclient/discovery_cache/documents/playablelocations.v3.json index 27232a08763..3fc3edaed00 100644 --- a/googleapiclient/discovery_cache/documents/playablelocations.v3.json +++ b/googleapiclient/discovery_cache/documents/playablelocations.v3.json @@ -146,7 +146,7 @@ } } }, - "revision": "20210918", + "revision": "20210925", "rootUrl": "https://playablelocations.googleapis.com/", "schemas": { "GoogleMapsPlayablelocationsV3Impression": { diff --git a/googleapiclient/discovery_cache/documents/playcustomapp.v1.json b/googleapiclient/discovery_cache/documents/playcustomapp.v1.json index 56ad029396a..6332227409b 100644 --- a/googleapiclient/discovery_cache/documents/playcustomapp.v1.json +++ b/googleapiclient/discovery_cache/documents/playcustomapp.v1.json @@ -158,7 +158,7 @@ } } }, - "revision": "20210918", + "revision": "20210921", "rootUrl": "https://playcustomapp.googleapis.com/", "schemas": { "CustomApp": { diff --git a/googleapiclient/discovery_cache/documents/policyanalyzer.v1.json b/googleapiclient/discovery_cache/documents/policyanalyzer.v1.json index f0157211e01..ce364721f56 100644 --- a/googleapiclient/discovery_cache/documents/policyanalyzer.v1.json +++ b/googleapiclient/discovery_cache/documents/policyanalyzer.v1.json @@ -123,7 +123,7 @@ ], "parameters": { "filter": { - "description": "Optional. Filter expression to restrict the activities returned. Supported filters are: - service_account_last_authn.full_resource_name {=} [STRING] - service_account_key_last_authn.full_resource_name {=} [STRING]", + "description": "Optional. Filter expression to restrict the activities returned. For serviceAccountLastAuthentication activities, supported filters are: - `activities.full_resource_name {=} [STRING]` - `activities.fullResourceName {=} [STRING]` where `[STRING]` is the full resource name of the service account. For serviceAccountKeyLastAuthentication activities, supported filters are: - `activities.full_resource_name {=} [STRING]` - `activities.fullResourceName {=} [STRING]` where `[STRING]` is the full resource name of the service account key.", "location": "query", "type": "string" }, @@ -163,7 +163,7 @@ } } }, - "revision": "20210906", + "revision": "20210918", "rootUrl": "https://policyanalyzer.googleapis.com/", "schemas": { "GoogleCloudPolicyanalyzerV1Activity": { diff --git a/googleapiclient/discovery_cache/documents/policyanalyzer.v1beta1.json b/googleapiclient/discovery_cache/documents/policyanalyzer.v1beta1.json index 4c87892adac..7be8772e423 100644 --- a/googleapiclient/discovery_cache/documents/policyanalyzer.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/policyanalyzer.v1beta1.json @@ -163,7 +163,7 @@ } } }, - "revision": "20210906", + "revision": "20210918", "rootUrl": "https://policyanalyzer.googleapis.com/", "schemas": { "GoogleCloudPolicyanalyzerV1beta1Activity": { diff --git a/googleapiclient/discovery_cache/documents/policysimulator.v1.json b/googleapiclient/discovery_cache/documents/policysimulator.v1.json index b9562cc4636..8f48abd85cd 100644 --- a/googleapiclient/discovery_cache/documents/policysimulator.v1.json +++ b/googleapiclient/discovery_cache/documents/policysimulator.v1.json @@ -493,7 +493,7 @@ } } }, - "revision": "20210910", + "revision": "20210918", "rootUrl": "https://policysimulator.googleapis.com/", "schemas": { "GoogleCloudPolicysimulatorV1AccessStateDiff": { diff --git a/googleapiclient/discovery_cache/documents/policysimulator.v1beta1.json b/googleapiclient/discovery_cache/documents/policysimulator.v1beta1.json index e8295e65c23..816fc7d4fa2 100644 --- a/googleapiclient/discovery_cache/documents/policysimulator.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/policysimulator.v1beta1.json @@ -493,7 +493,7 @@ } } }, - "revision": "20210910", + "revision": "20210918", "rootUrl": "https://policysimulator.googleapis.com/", "schemas": { "GoogleCloudPolicysimulatorV1Replay": { diff --git a/googleapiclient/discovery_cache/documents/policytroubleshooter.v1.json b/googleapiclient/discovery_cache/documents/policytroubleshooter.v1.json index bf0d7232a01..9907d1be951 100644 --- a/googleapiclient/discovery_cache/documents/policytroubleshooter.v1.json +++ b/googleapiclient/discovery_cache/documents/policytroubleshooter.v1.json @@ -128,7 +128,7 @@ } } }, - "revision": "20210910", + "revision": "20210918", "rootUrl": "https://policytroubleshooter.googleapis.com/", "schemas": { "GoogleCloudPolicytroubleshooterV1AccessTuple": { diff --git a/googleapiclient/discovery_cache/documents/policytroubleshooter.v1beta.json b/googleapiclient/discovery_cache/documents/policytroubleshooter.v1beta.json index 86fc591f5d7..00888c8db64 100644 --- a/googleapiclient/discovery_cache/documents/policytroubleshooter.v1beta.json +++ b/googleapiclient/discovery_cache/documents/policytroubleshooter.v1beta.json @@ -128,7 +128,7 @@ } } }, - "revision": "20210910", + "revision": "20210918", "rootUrl": "https://policytroubleshooter.googleapis.com/", "schemas": { "GoogleCloudPolicytroubleshooterV1betaAccessTuple": { diff --git a/googleapiclient/discovery_cache/documents/prod_tt_sasportal.v1alpha1.json b/googleapiclient/discovery_cache/documents/prod_tt_sasportal.v1alpha1.json index 6c8b9975f27..1684ec1ce5f 100644 --- a/googleapiclient/discovery_cache/documents/prod_tt_sasportal.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/prod_tt_sasportal.v1alpha1.json @@ -2484,7 +2484,7 @@ } } }, - "revision": "20210918", + "revision": "20210924", "rootUrl": "https://prod-tt-sasportal.googleapis.com/", "schemas": { "SasPortalAssignment": { diff --git a/googleapiclient/discovery_cache/documents/pubsub.v1.json b/googleapiclient/discovery_cache/documents/pubsub.v1.json index e068a742db7..bf85e3410a1 100644 --- a/googleapiclient/discovery_cache/documents/pubsub.v1.json +++ b/googleapiclient/discovery_cache/documents/pubsub.v1.json @@ -1424,7 +1424,7 @@ } } }, - "revision": "20210910", + "revision": "20210918", "rootUrl": "https://pubsub.googleapis.com/", "schemas": { "AcknowledgeRequest": { diff --git a/googleapiclient/discovery_cache/documents/pubsub.v1beta1a.json b/googleapiclient/discovery_cache/documents/pubsub.v1beta1a.json index 29a6617963b..5e6004b2558 100644 --- a/googleapiclient/discovery_cache/documents/pubsub.v1beta1a.json +++ b/googleapiclient/discovery_cache/documents/pubsub.v1beta1a.json @@ -457,7 +457,7 @@ } } }, - "revision": "20210910", + "revision": "20210918", "rootUrl": "https://pubsub.googleapis.com/", "schemas": { "AcknowledgeRequest": { diff --git a/googleapiclient/discovery_cache/documents/pubsub.v1beta2.json b/googleapiclient/discovery_cache/documents/pubsub.v1beta2.json index 33f146ba2a3..d060bee1cb9 100644 --- a/googleapiclient/discovery_cache/documents/pubsub.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/pubsub.v1beta2.json @@ -724,7 +724,7 @@ } } }, - "revision": "20210910", + "revision": "20210918", "rootUrl": "https://pubsub.googleapis.com/", "schemas": { "AcknowledgeRequest": { diff --git a/googleapiclient/discovery_cache/documents/pubsublite.v1.json b/googleapiclient/discovery_cache/documents/pubsublite.v1.json index 4509b6977ef..124fd464bae 100644 --- a/googleapiclient/discovery_cache/documents/pubsublite.v1.json +++ b/googleapiclient/discovery_cache/documents/pubsublite.v1.json @@ -1040,7 +1040,7 @@ } } }, - "revision": "20210913", + "revision": "20210920", "rootUrl": "https://pubsublite.googleapis.com/", "schemas": { "CancelOperationRequest": { diff --git a/googleapiclient/discovery_cache/documents/realtimebidding.v1.json b/googleapiclient/discovery_cache/documents/realtimebidding.v1.json index 190b2e8e4b1..6a961b3abd1 100644 --- a/googleapiclient/discovery_cache/documents/realtimebidding.v1.json +++ b/googleapiclient/discovery_cache/documents/realtimebidding.v1.json @@ -1140,7 +1140,7 @@ } } }, - "revision": "20210918", + "revision": "20210925", "rootUrl": "https://realtimebidding.googleapis.com/", "schemas": { "ActivatePretargetingConfigRequest": { diff --git a/googleapiclient/discovery_cache/documents/realtimebidding.v1alpha.json b/googleapiclient/discovery_cache/documents/realtimebidding.v1alpha.json index 5a8878f0d90..3b898a808b2 100644 --- a/googleapiclient/discovery_cache/documents/realtimebidding.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/realtimebidding.v1alpha.json @@ -234,7 +234,7 @@ } } }, - "revision": "20210918", + "revision": "20210925", "rootUrl": "https://realtimebidding.googleapis.com/", "schemas": { "ActivateBiddingFunctionRequest": { @@ -286,7 +286,7 @@ "enumDescriptions": [ "Default value that should not be used.", "Bidding function that can be used by Authorized Buyers in the original TURTLEDOVE simulation. See documentation on the TURTLEDOVE simulation at https://developers.google.com/authorized-buyers/rtb/turtledove. The function takes in a Javascript object, `inputs`, that contains the following named fields: `openrtbContextualBidRequest` OR `googleContextualBidRequest`, `customContextualSignal`, `interestBasedBidData`, `interestGroupData`, and returns the bid price CPM. Example: ``` /* Returns a bid price CPM. * * @param {Object} inputs an object with the * following named fields: * - openrtbContextualBidRequest * OR googleContextualBidRequest * - customContextualSignal * - interestBasedBidData * - interestGroupData */ function biddingFunction(inputs) { ... return inputs.interestBasedBidData.cpm * inputs.customContextualSignals.placementMultiplier; } ```", - "Buyer's interest group bidding function that can be used by Authorized Buyers in the FLEDGE simulation. See the FLEDGE explainer at https://github.com/WICG/turtledove/blob/main/FLEDGE.md#32-on-device-bidding. The function takes one argument, `inputs`, that contains an object with the following named fields of the form: ``` { \"interestGroup\" : { \"ad\" : [ \"buyerCreativeId\": \"...\", # Ad creative ID \"adData\": { # JSON object } ], \"userBiddingSignals\": { . # JSON object } }, \"auctionSignals\": { \"url\": # string, \"slotVisibility\": # enum value, \"slotDimensions\": [ { \"height\": # number value \"width\": # number value } ] }, \"perBuyerSignals\": { # JSON object }, \"trustedBiddingSignals\": { # JSON object }, \"browserSignals\": { \"recent_impression_ages_secs\": [ # Array of integers. Not yet populated. ] } } ``` `interestGroup`: An object containing a list of `ad` objects, which contain the following named fields: - `buyerCreativeId`: The ad creative ID string. - `adData`: Any JSON value of the bidder's choosing to contain data associated with an ad provided in `BidResponse.ad.adslot.ad_data` for the Google Authorized Buyers protocol and `BidResponse.seatbid.bid.ext.ad_data` for the OpenRTB protocol. - `userBiddingSignals`: Any JSON value of the bidder's choosing containing interest group data that corresponds to user_bidding_signals (as in FLEDGE). This field will be populated from `BidResponse.interest_group_map.user_bidding_signals` for Google Authorized Buyers protocol and `BidResponse.ext.interest_group_map.user_bidding_signals` for the OpenRTB protocol. `auctionSignals`: Contains data from the seller. It corresponds to the auction signals data described in the FLEDGE proposal. It is an object containing the following named fields: - `url`: The string URL of the page with parameters removed. - `slotVisibility`: Enum of one of the following potential values: - NO_DETECTION = 0 - ABOVE_THE_FOLD = 1 - BELOW_THE_FOLD = 2 - `slotDimensions`: A list of objects containing containing width and height pairs in `width` and `height` fields, respectively, from `BidRequest.adslot.width` and `BidRequest.adslot.height` for the Google Authorized Buyers protocol and `BidRequest.imp.banner.format.w` and `BidRequest.imp.banner.format.h` for the OpenRTB protocol. `perBuyerSignals`: The contextual signals from the bid response that are populated in `BidResponse.interest_group_bidding.interest_group_buyers.per_buyer_signals` for the Google Authorized Buyers protocol and `BidResponse.ext.interest_group_bidding.interest_group_buyers.per_buyer_signals` for the OpenRTB protocol. These signals can be of any JSON format of your choosing, however, the buyer's domain name must match between: - the interest group response in `BidResponse.interest_group_map.buyer_domain` for the Google Authorized Buyers protocol or in `BidResponse.ext.interest_group_map.buyer_domain` for the OpenRTB protocol. - the contextual response as a key to the map in `BidResponse.interest_group_bidding.interest_group_buyers` for the Google Authorized Buyers protocol or in `BidResponse.ext.interest_group_bidding.interest_group_buyers` for the OpenRTB protocol. In other words, there must be a match between the buyer domain of the contextual per_buyer_signals and the domain of an interest group. `trustedBiddingSignals`: The trusted bidding signals that corresponds to the trusted_bidding_signals in the FLEDGE proposal. It is provided in the interest group response as `BidResponse.interest_group_map.user_bidding_signals` for the Google Authorized Buyers protocol and `BidResponse.ext.interest_group_map.user_bidding_signals` for the OpenRTB protocol. This field can be any JSON format of your choosing. `browserSignals`: An object of simulated browser-provider signals. It is an object with a single named field, `recent_impression_ages_secs`, that contains a list of estimated number value recent impression ages in seconds for a given interest group. `recent_impression_ages_secs` is not yet populated. The function returns the string creative ID of the selected ad, the bid price CPM, and (optionally) selected product IDs. In addition, the bidding function may populate an optional string debug token that may be useful for remote debugging of a bidding function performing unexpectedly. This debug string is available in `BidResponseFeedback` (https://developers.google.com/authorized-buyers/rtb/realtime-bidding-guide#bidresponsefeedback-object) and BidFeedback (https://developers.google.com/authorized-buyers/rtb/openrtb-guide#bidfeedback), for the Google protocol and openRTB protocol respectively. Example: ``` function biddingFunction(inputs) { ... return { \"buyerCreativeId\": \"ad_creative_id_1\", \"bidPriceCpm\": 0.3, \"productIds\": [\"product_id_1\", \"product_id_2\", \"product_id_3\"] \"debugString\": \"Bidding function executed successfully!\" } } ```" + "Buyer's interest group bidding function that can be used by Authorized Buyers in the FLEDGE simulation. See the FLEDGE explainer at https://github.com/WICG/turtledove/blob/main/FLEDGE.md#32-on-device-bidding. The function takes one argument, `inputs`, that contains an object with the following named fields of the form: ``` { \"interestGroup\" : { \"ad\" : [ \"buyerCreativeId\": \"...\", # Ad creative ID \"adData\": { # JSON object } ], \"userBiddingSignals\": { . # JSON object } }, \"auctionSignals\": { \"url\": # string, \"slotVisibility\": # enum value, \"slotDimensions\": [ { \"height\": # number value \"width\": # number value } ] }, \"perBuyerSignals\": { # JSON object }, \"trustedBiddingSignals\": { # JSON object }, \"browserSignals\": { \"recent_impression_ages_secs\": [ # Array of integers. Not yet populated. ] } } ``` `interestGroup`: An object containing a list of `ad` objects, which contain the following named fields: - `buyerCreativeId`: The ad creative ID string. - `adData`: Any JSON value of the bidder's choosing to contain data associated with an ad provided in `BidResponse.ad.adslot.ad_data` for the Google Authorized Buyers protocol and `BidResponse.seatbid.bid.ext.ad_data` for the OpenRTB protocol. - `userBiddingSignals`: Any JSON value of the bidder's choosing containing interest group data that corresponds to user_bidding_signals (as in FLEDGE). This field will be populated from `BidResponse.interest_group_map.user_bidding_signals` for Google Authorized Buyers protocol and `BidResponse.ext.interest_group_map.user_bidding_signals` for the OpenRTB protocol. `auctionSignals`: Contains data from the seller. It corresponds to the auction signals data described in the FLEDGE proposal. It is an object containing the following named fields: - `url`: The string URL of the page with parameters removed. - `slotVisibility`: Enum of one of the following potential values: - NO_DETECTION = 0 - ABOVE_THE_FOLD = 1 - BELOW_THE_FOLD = 2 - `slotDimensions`: A list of objects containing containing width and height pairs in `width` and `height` fields, respectively, from `BidRequest.adslot.width` and `BidRequest.adslot.height` for the Google Authorized Buyers protocol and `BidRequest.imp.banner.format.w` and `BidRequest.imp.banner.format.h` for the OpenRTB protocol. `perBuyerSignals`: The contextual signals from the bid response that are populated in `BidResponse.interest_group_bidding.interest_group_buyers.per_buyer_signals` for the Google Authorized Buyers protocol and `BidResponse.ext.interest_group_bidding.interest_group_buyers.per_buyer_signals` for the OpenRTB protocol. These signals can be of any JSON format of your choosing, however, the buyer's domain name must match between: - the interest group response in `BidResponse.interest_group_map.buyer_domain` for the Google Authorized Buyers protocol or in `BidResponse.ext.interest_group_map.buyer_domain` for the OpenRTB protocol. - the contextual response as a key to the map in `BidResponse.interest_group_bidding.interest_group_buyers` for the Google Authorized Buyers protocol or in `BidResponse.ext.interest_group_bidding.interest_group_buyers` for the OpenRTB protocol. In other words, there must be a match between the buyer domain of the contextual per_buyer_signals and the domain of an interest group. `trustedBiddingSignals`: The trusted bidding signals that corresponds to the trusted_bidding_signals in the FLEDGE proposal. It is provided in the interest group response as `BidResponse.interest_group_map.user_bidding_signals` for the Google Authorized Buyers protocol and `BidResponse.ext.interest_group_map.user_bidding_signals` for the OpenRTB protocol. This field can be any JSON format of your choosing. `browserSignals`: An object of simulated browser-provider signals. It is an object with a single named field, `recent_impression_ages_secs`, that contains a list of estimated number value recent impression ages in seconds for a given interest group. `recent_impression_ages_secs` is not yet populated. The function returns the string creative ID of the selected ad, the bid price CPM, and (optionally) selected product IDs. In addition, the bidding function may populate an optional debug string that may be used for remote debugging and troubleshooting of a bidder-provided bidding function. The debug string should not contain a user identifier. The maximum length of the debug string is 200 bytes. This debug string is available in `BidResponseFeedback` (https://developers.google.com/authorized-buyers/rtb/realtime-bidding-guide#bidresponsefeedback-object) and `BidFeedback` (https://developers.google.com/authorized-buyers/rtb/openrtb-guide#bidfeedback), for the Google protocol and OpenRTB protocol respectively. In addition, the debug string can be inserted into the creative HTML snippet via macro substitution if the following string is included in the snippet: \u201c%%DEBUG_STRING%%\u201d. Please ensure the debug string complies with [Platform Program Policies](https://support.google.com/platformspolicy/answer/3013851). Sample Bidding Function: ``` function biddingFunction(inputs) { ... return { \"buyerCreativeId\": \"ad_creative_id_1\", \"bidPriceCpm\": 0.3, \"productIds\": [\"product_id_1\", \"product_id_2\", \"product_id_3\"] \"debugString\": \"Bidding function executed successfully!\" } } ```" ], "type": "string" } diff --git a/googleapiclient/discovery_cache/documents/recaptchaenterprise.v1.json b/googleapiclient/discovery_cache/documents/recaptchaenterprise.v1.json index 429bdfea353..13544e2ea2b 100644 --- a/googleapiclient/discovery_cache/documents/recaptchaenterprise.v1.json +++ b/googleapiclient/discovery_cache/documents/recaptchaenterprise.v1.json @@ -375,7 +375,7 @@ } } }, - "revision": "20210910", + "revision": "20210924", "rootUrl": "https://recaptchaenterprise.googleapis.com/", "schemas": { "GoogleCloudRecaptchaenterpriseV1AndroidKeySettings": { diff --git a/googleapiclient/discovery_cache/documents/recommender.v1.json b/googleapiclient/discovery_cache/documents/recommender.v1.json index 3bee0ea94c3..78743aa2bb1 100644 --- a/googleapiclient/discovery_cache/documents/recommender.v1.json +++ b/googleapiclient/discovery_cache/documents/recommender.v1.json @@ -1178,7 +1178,7 @@ } } }, - "revision": "20210910", + "revision": "20210918", "rootUrl": "https://recommender.googleapis.com/", "schemas": { "GoogleCloudRecommenderV1CostProjection": { diff --git a/googleapiclient/discovery_cache/documents/recommender.v1beta1.json b/googleapiclient/discovery_cache/documents/recommender.v1beta1.json index 34f99eb1d9a..4eab729f597 100644 --- a/googleapiclient/discovery_cache/documents/recommender.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/recommender.v1beta1.json @@ -1178,7 +1178,7 @@ } } }, - "revision": "20210910", + "revision": "20210918", "rootUrl": "https://recommender.googleapis.com/", "schemas": { "GoogleCloudRecommenderV1beta1CostProjection": { diff --git a/googleapiclient/discovery_cache/documents/reseller.v1.json b/googleapiclient/discovery_cache/documents/reseller.v1.json index b922aa1ede9..ec1c6331417 100644 --- a/googleapiclient/discovery_cache/documents/reseller.v1.json +++ b/googleapiclient/discovery_cache/documents/reseller.v1.json @@ -631,7 +631,7 @@ } } }, - "revision": "20210913", + "revision": "20210921", "rootUrl": "https://reseller.googleapis.com/", "schemas": { "Address": { diff --git a/googleapiclient/discovery_cache/documents/resourcesettings.v1.json b/googleapiclient/discovery_cache/documents/resourcesettings.v1.json index 19defd9fb84..166977e38e0 100644 --- a/googleapiclient/discovery_cache/documents/resourcesettings.v1.json +++ b/googleapiclient/discovery_cache/documents/resourcesettings.v1.json @@ -499,7 +499,7 @@ } } }, - "revision": "20210918", + "revision": "20210924", "rootUrl": "https://resourcesettings.googleapis.com/", "schemas": { "GoogleCloudResourcesettingsV1ListSettingsResponse": { diff --git a/googleapiclient/discovery_cache/documents/retail.v2.json b/googleapiclient/discovery_cache/documents/retail.v2.json index 437c36b2850..d7c76af8920 100644 --- a/googleapiclient/discovery_cache/documents/retail.v2.json +++ b/googleapiclient/discovery_cache/documents/retail.v2.json @@ -527,7 +527,7 @@ "type": "boolean" }, "name": { - "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be \"default_branch\".", + "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/catalogs/[^/]+/branches/[^/]+/products/.*$", "required": true, @@ -589,7 +589,7 @@ ], "parameters": { "name": { - "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be \"default_branch\".", + "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/catalogs/[^/]+/branches/[^/]+/products/.*$", "required": true, @@ -1007,7 +1007,7 @@ } } }, - "revision": "20210909", + "revision": "20210924", "rootUrl": "https://retail.googleapis.com/", "schemas": { "GoogleApiHttpBody": { @@ -1282,7 +1282,7 @@ "type": "array" }, "colors": { - "description": "The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single \"Mixed\" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).", + "description": "The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single \"Mixed\" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).", "items": { "type": "string" }, @@ -1981,7 +1981,7 @@ "type": "array" }, "name": { - "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be \"default_branch\".", + "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.", "type": "string" }, "patterns": { @@ -2924,6 +2924,12 @@ "properties": {}, "type": "object" }, + "GoogleCloudRetailV2alphaEnrollSolutionMetadata": { + "description": "Metadata related to the EnrollSolution method. This will be returned by the google.longrunning.Operation.metadata field.", + "id": "GoogleCloudRetailV2alphaEnrollSolutionMetadata", + "properties": {}, + "type": "object" + }, "GoogleCloudRetailV2alphaExportErrorsConfig": { "description": "Configuration of destination for Export related errors.", "id": "GoogleCloudRetailV2alphaExportErrorsConfig", diff --git a/googleapiclient/discovery_cache/documents/retail.v2alpha.json b/googleapiclient/discovery_cache/documents/retail.v2alpha.json index a118f6bcbec..bd9a074b5ca 100644 --- a/googleapiclient/discovery_cache/documents/retail.v2alpha.json +++ b/googleapiclient/discovery_cache/documents/retail.v2alpha.json @@ -532,7 +532,7 @@ "type": "boolean" }, "name": { - "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be \"default_branch\".", + "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/catalogs/[^/]+/branches/[^/]+/products/.*$", "required": true, @@ -594,7 +594,7 @@ ], "parameters": { "name": { - "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be \"default_branch\".", + "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/catalogs/[^/]+/branches/[^/]+/products/.*$", "required": true, @@ -1012,7 +1012,7 @@ } } }, - "revision": "20210909", + "revision": "20210924", "rootUrl": "https://retail.googleapis.com/", "schemas": { "GoogleApiHttpBody": { @@ -1480,7 +1480,7 @@ "type": "array" }, "colors": { - "description": "The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single \"Mixed\" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).", + "description": "The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single \"Mixed\" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).", "items": { "type": "string" }, @@ -1604,6 +1604,12 @@ }, "type": "object" }, + "GoogleCloudRetailV2alphaEnrollSolutionMetadata": { + "description": "Metadata related to the EnrollSolution method. This will be returned by the google.longrunning.Operation.metadata field.", + "id": "GoogleCloudRetailV2alphaEnrollSolutionMetadata", + "properties": {}, + "type": "object" + }, "GoogleCloudRetailV2alphaExportErrorsConfig": { "description": "Configuration of destination for Export related errors.", "id": "GoogleCloudRetailV2alphaExportErrorsConfig", @@ -2285,7 +2291,7 @@ "type": "array" }, "name": { - "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be \"default_branch\".", + "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.", "type": "string" }, "patterns": { diff --git a/googleapiclient/discovery_cache/documents/retail.v2beta.json b/googleapiclient/discovery_cache/documents/retail.v2beta.json index 7a3594b456e..2d5f4fdc8cb 100644 --- a/googleapiclient/discovery_cache/documents/retail.v2beta.json +++ b/googleapiclient/discovery_cache/documents/retail.v2beta.json @@ -527,7 +527,7 @@ "type": "boolean" }, "name": { - "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be \"default_branch\".", + "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/catalogs/[^/]+/branches/[^/]+/products/.*$", "required": true, @@ -589,7 +589,7 @@ ], "parameters": { "name": { - "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be \"default_branch\".", + "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/catalogs/[^/]+/branches/[^/]+/products/.*$", "required": true, @@ -1007,7 +1007,7 @@ } } }, - "revision": "20210909", + "revision": "20210924", "rootUrl": "https://retail.googleapis.com/", "schemas": { "GoogleApiHttpBody": { @@ -1361,6 +1361,12 @@ "properties": {}, "type": "object" }, + "GoogleCloudRetailV2alphaEnrollSolutionMetadata": { + "description": "Metadata related to the EnrollSolution method. This will be returned by the google.longrunning.Operation.metadata field.", + "id": "GoogleCloudRetailV2alphaEnrollSolutionMetadata", + "properties": {}, + "type": "object" + }, "GoogleCloudRetailV2alphaExportErrorsConfig": { "description": "Configuration of destination for Export related errors.", "id": "GoogleCloudRetailV2alphaExportErrorsConfig", @@ -1724,7 +1730,7 @@ "type": "array" }, "colors": { - "description": "The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single \"Mixed\" instead of multiple values. A maximum of 5 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).", + "description": "The color display names, which may be different from standard color family names, such as the color aliases used in the website frontend. Normally it is expected to have only 1 color. May consider using single \"Mixed\" instead of multiple values. A maximum of 25 colors are allowed. Each value must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. Google Merchant Center property [color](https://support.google.com/merchants/answer/6324487). Schema.org property [Product.color](https://schema.org/color).", "items": { "type": "string" }, @@ -2487,7 +2493,7 @@ "type": "array" }, "name": { - "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`. The branch ID must be \"default_branch\".", + "description": "Immutable. Full resource name of the product, such as `projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id`.", "type": "string" }, "patterns": { diff --git a/googleapiclient/discovery_cache/documents/run.v1.json b/googleapiclient/discovery_cache/documents/run.v1.json index a6c67e97b15..85171ff19a2 100644 --- a/googleapiclient/discovery_cache/documents/run.v1.json +++ b/googleapiclient/discovery_cache/documents/run.v1.json @@ -1736,7 +1736,7 @@ } } }, - "revision": "20210910", + "revision": "20210917", "rootUrl": "https://run.googleapis.com/", "schemas": { "Addressable": { diff --git a/googleapiclient/discovery_cache/documents/run.v1alpha1.json b/googleapiclient/discovery_cache/documents/run.v1alpha1.json index e897cde5cb3..4e43bbb6680 100644 --- a/googleapiclient/discovery_cache/documents/run.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/run.v1alpha1.json @@ -268,7 +268,7 @@ } } }, - "revision": "20210910", + "revision": "20210917", "rootUrl": "https://run.googleapis.com/", "schemas": { "ConfigMapEnvSource": { diff --git a/googleapiclient/discovery_cache/documents/runtimeconfig.v1.json b/googleapiclient/discovery_cache/documents/runtimeconfig.v1.json index d010e02a188..5d9d6914218 100644 --- a/googleapiclient/discovery_cache/documents/runtimeconfig.v1.json +++ b/googleapiclient/discovery_cache/documents/runtimeconfig.v1.json @@ -210,7 +210,7 @@ } } }, - "revision": "20210910", + "revision": "20210920", "rootUrl": "https://runtimeconfig.googleapis.com/", "schemas": { "CancelOperationRequest": { diff --git a/googleapiclient/discovery_cache/documents/runtimeconfig.v1beta1.json b/googleapiclient/discovery_cache/documents/runtimeconfig.v1beta1.json index 06f593b6afc..6ecc0c5f7e1 100644 --- a/googleapiclient/discovery_cache/documents/runtimeconfig.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/runtimeconfig.v1beta1.json @@ -805,7 +805,7 @@ } } }, - "revision": "20210910", + "revision": "20210925", "rootUrl": "https://runtimeconfig.googleapis.com/", "schemas": { "Binding": { diff --git a/googleapiclient/discovery_cache/documents/safebrowsing.v4.json b/googleapiclient/discovery_cache/documents/safebrowsing.v4.json index 4295ff74115..9d9514b4563 100644 --- a/googleapiclient/discovery_cache/documents/safebrowsing.v4.json +++ b/googleapiclient/discovery_cache/documents/safebrowsing.v4.json @@ -261,7 +261,7 @@ } } }, - "revision": "20210914", + "revision": "20210924", "rootUrl": "https://safebrowsing.googleapis.com/", "schemas": { "GoogleProtobufEmpty": { diff --git a/googleapiclient/discovery_cache/documents/secretmanager.v1.json b/googleapiclient/discovery_cache/documents/secretmanager.v1.json index 5a705c0cc23..3f512f58f00 100644 --- a/googleapiclient/discovery_cache/documents/secretmanager.v1.json +++ b/googleapiclient/discovery_cache/documents/secretmanager.v1.json @@ -643,7 +643,7 @@ } } }, - "revision": "20210910", + "revision": "20210917", "rootUrl": "https://secretmanager.googleapis.com/", "schemas": { "AccessSecretVersionResponse": { diff --git a/googleapiclient/discovery_cache/documents/secretmanager.v1beta1.json b/googleapiclient/discovery_cache/documents/secretmanager.v1beta1.json index 712a5ac1ee1..55ffbc70b40 100644 --- a/googleapiclient/discovery_cache/documents/secretmanager.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/secretmanager.v1beta1.json @@ -628,7 +628,7 @@ } } }, - "revision": "20210910", + "revision": "20210917", "rootUrl": "https://secretmanager.googleapis.com/", "schemas": { "AccessSecretVersionResponse": { diff --git a/googleapiclient/discovery_cache/documents/securitycenter.v1.json b/googleapiclient/discovery_cache/documents/securitycenter.v1.json index 6fe3c6eaded..86124a26c9a 100644 --- a/googleapiclient/discovery_cache/documents/securitycenter.v1.json +++ b/googleapiclient/discovery_cache/documents/securitycenter.v1.json @@ -1816,7 +1816,7 @@ } } }, - "revision": "20210917", + "revision": "20210923", "rootUrl": "https://securitycenter.googleapis.com/", "schemas": { "Asset": { diff --git a/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json b/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json index b0b7b0e298c..dd78dd6b4c3 100644 --- a/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json @@ -896,7 +896,7 @@ } } }, - "revision": "20210917", + "revision": "20210923", "rootUrl": "https://securitycenter.googleapis.com/", "schemas": { "Asset": { diff --git a/googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json b/googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json index 1dc6e4c11d5..8a67a432e71 100644 --- a/googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json @@ -1328,7 +1328,7 @@ } } }, - "revision": "20210917", + "revision": "20210923", "rootUrl": "https://securitycenter.googleapis.com/", "schemas": { "Config": { diff --git a/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json b/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json index 115d47d67c4..cf84bebb639 100644 --- a/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json +++ b/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json @@ -542,7 +542,7 @@ } } }, - "revision": "20210910", + "revision": "20210924", "rootUrl": "https://serviceconsumermanagement.googleapis.com/", "schemas": { "AddTenantProjectRequest": { diff --git a/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json b/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json index ace23484080..f1c04f9c26f 100644 --- a/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json @@ -500,7 +500,7 @@ } } }, - "revision": "20210910", + "revision": "20210924", "rootUrl": "https://serviceconsumermanagement.googleapis.com/", "schemas": { "Api": { diff --git a/googleapiclient/discovery_cache/documents/servicecontrol.v1.json b/googleapiclient/discovery_cache/documents/servicecontrol.v1.json index 8c8ed08ea24..c19946010dd 100644 --- a/googleapiclient/discovery_cache/documents/servicecontrol.v1.json +++ b/googleapiclient/discovery_cache/documents/servicecontrol.v1.json @@ -197,7 +197,7 @@ } } }, - "revision": "20210910", + "revision": "20210916", "rootUrl": "https://servicecontrol.googleapis.com/", "schemas": { "AllocateInfo": { diff --git a/googleapiclient/discovery_cache/documents/servicecontrol.v2.json b/googleapiclient/discovery_cache/documents/servicecontrol.v2.json index ff0de412a76..0c584cd0601 100644 --- a/googleapiclient/discovery_cache/documents/servicecontrol.v2.json +++ b/googleapiclient/discovery_cache/documents/servicecontrol.v2.json @@ -169,7 +169,7 @@ } } }, - "revision": "20210910", + "revision": "20210916", "rootUrl": "https://servicecontrol.googleapis.com/", "schemas": { "Api": { diff --git a/googleapiclient/discovery_cache/documents/servicedirectory.v1.json b/googleapiclient/discovery_cache/documents/servicedirectory.v1.json index d413161a09a..ff0e52a1c59 100644 --- a/googleapiclient/discovery_cache/documents/servicedirectory.v1.json +++ b/googleapiclient/discovery_cache/documents/servicedirectory.v1.json @@ -883,7 +883,7 @@ } } }, - "revision": "20210907", + "revision": "20210914", "rootUrl": "https://servicedirectory.googleapis.com/", "schemas": { "Binding": { diff --git a/googleapiclient/discovery_cache/documents/servicedirectory.v1beta1.json b/googleapiclient/discovery_cache/documents/servicedirectory.v1beta1.json index 5089c8b2cbd..aace64f1a94 100644 --- a/googleapiclient/discovery_cache/documents/servicedirectory.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/servicedirectory.v1beta1.json @@ -883,7 +883,7 @@ } } }, - "revision": "20210907", + "revision": "20210914", "rootUrl": "https://servicedirectory.googleapis.com/", "schemas": { "Binding": { diff --git a/googleapiclient/discovery_cache/documents/servicemanagement.v1.json b/googleapiclient/discovery_cache/documents/servicemanagement.v1.json index beb997f452a..d320cc7c3aa 100644 --- a/googleapiclient/discovery_cache/documents/servicemanagement.v1.json +++ b/googleapiclient/discovery_cache/documents/servicemanagement.v1.json @@ -829,7 +829,7 @@ } } }, - "revision": "20210910", + "revision": "20210917", "rootUrl": "https://servicemanagement.googleapis.com/", "schemas": { "Advice": { diff --git a/googleapiclient/discovery_cache/documents/servicenetworking.v1.json b/googleapiclient/discovery_cache/documents/servicenetworking.v1.json index fabf5701450..f0603b62b26 100644 --- a/googleapiclient/discovery_cache/documents/servicenetworking.v1.json +++ b/googleapiclient/discovery_cache/documents/servicenetworking.v1.json @@ -860,7 +860,7 @@ } } }, - "revision": "20210918", + "revision": "20210924", "rootUrl": "https://servicenetworking.googleapis.com/", "schemas": { "AddDnsRecordSetMetadata": { @@ -987,6 +987,10 @@ "format": "int32", "type": "integer" }, + "outsideAllocationPublicIpRange": { + "description": "Optional. Enable outside allocation using public IP addresses. Any public IP range may be specified. If this field is provided, we will not use customer reserved ranges for this primary IP range.", + "type": "string" + }, "privateIpv6GoogleAccess": { "description": "Optional. The private IPv6 google access type for the VMs in this subnet. For information about the access types that can be set using this field, see [subnetwork](https://cloud.google.com/compute/docs/reference/rest/v1/subnetworks) in the Compute API documentation.", "type": "string" @@ -2822,6 +2826,10 @@ "format": "int32", "type": "integer" }, + "outsideAllocationPublicIpRange": { + "description": "Optional. Enable outside allocation using public IP addresses. Any public IP range may be specified. If this field is provided, we will not use customer reserved ranges for this secondary IP range.", + "type": "string" + }, "rangeName": { "description": "Required. A name for the secondary IP range. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork.", "type": "string" diff --git a/googleapiclient/discovery_cache/documents/servicenetworking.v1beta.json b/googleapiclient/discovery_cache/documents/servicenetworking.v1beta.json index ef2f28d0cbf..354e2612dea 100644 --- a/googleapiclient/discovery_cache/documents/servicenetworking.v1beta.json +++ b/googleapiclient/discovery_cache/documents/servicenetworking.v1beta.json @@ -307,7 +307,7 @@ } } }, - "revision": "20210918", + "revision": "20210924", "rootUrl": "https://servicenetworking.googleapis.com/", "schemas": { "AddDnsRecordSetMetadata": { diff --git a/googleapiclient/discovery_cache/documents/serviceusage.v1.json b/googleapiclient/discovery_cache/documents/serviceusage.v1.json index c15b17b8a6a..6086f17d495 100644 --- a/googleapiclient/discovery_cache/documents/serviceusage.v1.json +++ b/googleapiclient/discovery_cache/documents/serviceusage.v1.json @@ -426,7 +426,7 @@ } } }, - "revision": "20210910", + "revision": "20210924", "rootUrl": "https://serviceusage.googleapis.com/", "schemas": { "AdminQuotaPolicy": { diff --git a/googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json b/googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json index 2505b29988a..15a59627df8 100644 --- a/googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json @@ -959,7 +959,7 @@ } } }, - "revision": "20210910", + "revision": "20210924", "rootUrl": "https://serviceusage.googleapis.com/", "schemas": { "AdminQuotaPolicy": { diff --git a/googleapiclient/discovery_cache/documents/sheets.v4.json b/googleapiclient/discovery_cache/documents/sheets.v4.json index 5845f6be702..8f8a40724b8 100644 --- a/googleapiclient/discovery_cache/documents/sheets.v4.json +++ b/googleapiclient/discovery_cache/documents/sheets.v4.json @@ -870,7 +870,7 @@ } } }, - "revision": "20210906", + "revision": "20210916", "rootUrl": "https://sheets.googleapis.com/", "schemas": { "AddBandingRequest": { diff --git a/googleapiclient/discovery_cache/documents/storage.v1.json b/googleapiclient/discovery_cache/documents/storage.v1.json index a00c74fe250..1ad40d36422 100644 --- a/googleapiclient/discovery_cache/documents/storage.v1.json +++ b/googleapiclient/discovery_cache/documents/storage.v1.json @@ -26,7 +26,7 @@ "description": "Stores and retrieves potentially large, immutable data objects.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/storage/docs/json_api/", - "etag": "\"353939383733353534373734383337373436\"", + "etag": "\"31383038373534363135323237313631333333\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -3230,7 +3230,7 @@ } } }, - "revision": "20210918", + "revision": "20210922", "rootUrl": "https://storage.googleapis.com/", "schemas": { "Bucket": { diff --git a/googleapiclient/discovery_cache/documents/streetviewpublish.v1.json b/googleapiclient/discovery_cache/documents/streetviewpublish.v1.json index 984b55c978f..bd5f37f991e 100644 --- a/googleapiclient/discovery_cache/documents/streetviewpublish.v1.json +++ b/googleapiclient/discovery_cache/documents/streetviewpublish.v1.json @@ -375,7 +375,7 @@ } } }, - "revision": "20210918", + "revision": "20210924", "rootUrl": "https://streetviewpublish.googleapis.com/", "schemas": { "BatchDeletePhotosRequest": { diff --git a/googleapiclient/discovery_cache/documents/sts.v1.json b/googleapiclient/discovery_cache/documents/sts.v1.json index bae7567a1d3..f6737de56fa 100644 --- a/googleapiclient/discovery_cache/documents/sts.v1.json +++ b/googleapiclient/discovery_cache/documents/sts.v1.json @@ -131,7 +131,7 @@ } } }, - "revision": "20210910", + "revision": "20210918", "rootUrl": "https://sts.googleapis.com/", "schemas": { "GoogleIamV1Binding": { diff --git a/googleapiclient/discovery_cache/documents/sts.v1beta.json b/googleapiclient/discovery_cache/documents/sts.v1beta.json index 45a727aab87..f96db47a700 100644 --- a/googleapiclient/discovery_cache/documents/sts.v1beta.json +++ b/googleapiclient/discovery_cache/documents/sts.v1beta.json @@ -116,7 +116,7 @@ } } }, - "revision": "20210910", + "revision": "20210918", "rootUrl": "https://sts.googleapis.com/", "schemas": { "GoogleIamV1Binding": { diff --git a/googleapiclient/discovery_cache/documents/tagmanager.v1.json b/googleapiclient/discovery_cache/documents/tagmanager.v1.json index 287f3198925..442557fd459 100644 --- a/googleapiclient/discovery_cache/documents/tagmanager.v1.json +++ b/googleapiclient/discovery_cache/documents/tagmanager.v1.json @@ -1932,7 +1932,7 @@ } } }, - "revision": "20210915", + "revision": "20210922", "rootUrl": "https://tagmanager.googleapis.com/", "schemas": { "Account": { diff --git a/googleapiclient/discovery_cache/documents/tagmanager.v2.json b/googleapiclient/discovery_cache/documents/tagmanager.v2.json index 32bbccee368..2db4ea7b5b4 100644 --- a/googleapiclient/discovery_cache/documents/tagmanager.v2.json +++ b/googleapiclient/discovery_cache/documents/tagmanager.v2.json @@ -3317,7 +3317,7 @@ } } }, - "revision": "20210915", + "revision": "20210922", "rootUrl": "https://tagmanager.googleapis.com/", "schemas": { "Account": { diff --git a/googleapiclient/discovery_cache/documents/tasks.v1.json b/googleapiclient/discovery_cache/documents/tasks.v1.json index 5cd3d4ee8fb..5175b54689e 100644 --- a/googleapiclient/discovery_cache/documents/tasks.v1.json +++ b/googleapiclient/discovery_cache/documents/tasks.v1.json @@ -566,7 +566,7 @@ } } }, - "revision": "20210918", + "revision": "20210924", "rootUrl": "https://tasks.googleapis.com/", "schemas": { "Task": { diff --git a/googleapiclient/discovery_cache/documents/testing.v1.json b/googleapiclient/discovery_cache/documents/testing.v1.json index f0602880bdb..4db8472a7ff 100644 --- a/googleapiclient/discovery_cache/documents/testing.v1.json +++ b/googleapiclient/discovery_cache/documents/testing.v1.json @@ -282,7 +282,7 @@ } } }, - "revision": "20210908", + "revision": "20210918", "rootUrl": "https://testing.googleapis.com/", "schemas": { "Account": { diff --git a/googleapiclient/discovery_cache/documents/texttospeech.v1.json b/googleapiclient/discovery_cache/documents/texttospeech.v1.json index bdf799f549f..3e65f99d456 100644 --- a/googleapiclient/discovery_cache/documents/texttospeech.v1.json +++ b/googleapiclient/discovery_cache/documents/texttospeech.v1.json @@ -153,7 +153,7 @@ } } }, - "revision": "20210827", + "revision": "20210917", "rootUrl": "https://texttospeech.googleapis.com/", "schemas": { "AudioConfig": { diff --git a/googleapiclient/discovery_cache/documents/texttospeech.v1beta1.json b/googleapiclient/discovery_cache/documents/texttospeech.v1beta1.json index 730aacb21f3..d631a51c359 100644 --- a/googleapiclient/discovery_cache/documents/texttospeech.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/texttospeech.v1beta1.json @@ -153,7 +153,7 @@ } } }, - "revision": "20210827", + "revision": "20210917", "rootUrl": "https://texttospeech.googleapis.com/", "schemas": { "AudioConfig": { diff --git a/googleapiclient/discovery_cache/documents/toolresults.v1beta3.json b/googleapiclient/discovery_cache/documents/toolresults.v1beta3.json index 467631e93ca..c74cec05618 100644 --- a/googleapiclient/discovery_cache/documents/toolresults.v1beta3.json +++ b/googleapiclient/discovery_cache/documents/toolresults.v1beta3.json @@ -1463,7 +1463,7 @@ } } }, - "revision": "20210918", + "revision": "20210927", "rootUrl": "https://toolresults.googleapis.com/", "schemas": { "ANR": { diff --git a/googleapiclient/discovery_cache/documents/vectortile.v1.json b/googleapiclient/discovery_cache/documents/vectortile.v1.json index b6d8ef1b400..7db2bd44917 100644 --- a/googleapiclient/discovery_cache/documents/vectortile.v1.json +++ b/googleapiclient/discovery_cache/documents/vectortile.v1.json @@ -343,7 +343,7 @@ } } }, - "revision": "20210918", + "revision": "20210924", "rootUrl": "https://vectortile.googleapis.com/", "schemas": { "Area": { diff --git a/googleapiclient/discovery_cache/documents/webrisk.v1.json b/googleapiclient/discovery_cache/documents/webrisk.v1.json index 09bb0eac72b..022477284fe 100644 --- a/googleapiclient/discovery_cache/documents/webrisk.v1.json +++ b/googleapiclient/discovery_cache/documents/webrisk.v1.json @@ -446,7 +446,7 @@ } } }, - "revision": "20210910", + "revision": "20210918", "rootUrl": "https://webrisk.googleapis.com/", "schemas": { "GoogleCloudWebriskV1ComputeThreatListDiffResponse": { diff --git a/googleapiclient/discovery_cache/documents/websecurityscanner.v1.json b/googleapiclient/discovery_cache/documents/websecurityscanner.v1.json index d935d30d753..1fd8f52a1f7 100644 --- a/googleapiclient/discovery_cache/documents/websecurityscanner.v1.json +++ b/googleapiclient/discovery_cache/documents/websecurityscanner.v1.json @@ -526,7 +526,7 @@ } } }, - "revision": "20210918", + "revision": "20210924", "rootUrl": "https://websecurityscanner.googleapis.com/", "schemas": { "Authentication": { diff --git a/googleapiclient/discovery_cache/documents/websecurityscanner.v1alpha.json b/googleapiclient/discovery_cache/documents/websecurityscanner.v1alpha.json index 5bda6340a12..404d4e00a4a 100644 --- a/googleapiclient/discovery_cache/documents/websecurityscanner.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/websecurityscanner.v1alpha.json @@ -526,7 +526,7 @@ } } }, - "revision": "20210918", + "revision": "20210924", "rootUrl": "https://websecurityscanner.googleapis.com/", "schemas": { "Authentication": { diff --git a/googleapiclient/discovery_cache/documents/websecurityscanner.v1beta.json b/googleapiclient/discovery_cache/documents/websecurityscanner.v1beta.json index 3afc0a9778e..f06efb79a0c 100644 --- a/googleapiclient/discovery_cache/documents/websecurityscanner.v1beta.json +++ b/googleapiclient/discovery_cache/documents/websecurityscanner.v1beta.json @@ -526,7 +526,7 @@ } } }, - "revision": "20210918", + "revision": "20210924", "rootUrl": "https://websecurityscanner.googleapis.com/", "schemas": { "Authentication": { diff --git a/googleapiclient/discovery_cache/documents/workflowexecutions.v1.json b/googleapiclient/discovery_cache/documents/workflowexecutions.v1.json index ce9f5632449..bbe4229f99a 100644 --- a/googleapiclient/discovery_cache/documents/workflowexecutions.v1.json +++ b/googleapiclient/discovery_cache/documents/workflowexecutions.v1.json @@ -269,7 +269,7 @@ } } }, - "revision": "20210913", + "revision": "20210920", "rootUrl": "https://workflowexecutions.googleapis.com/", "schemas": { "CancelExecutionRequest": { diff --git a/googleapiclient/discovery_cache/documents/workflowexecutions.v1beta.json b/googleapiclient/discovery_cache/documents/workflowexecutions.v1beta.json index 0d4e2d3cdb2..dbcd40ca3df 100644 --- a/googleapiclient/discovery_cache/documents/workflowexecutions.v1beta.json +++ b/googleapiclient/discovery_cache/documents/workflowexecutions.v1beta.json @@ -269,7 +269,7 @@ } } }, - "revision": "20210913", + "revision": "20210920", "rootUrl": "https://workflowexecutions.googleapis.com/", "schemas": { "CancelExecutionRequest": { diff --git a/googleapiclient/discovery_cache/documents/workflows.v1.json b/googleapiclient/discovery_cache/documents/workflows.v1.json index 3ec08038613..a6149c066a1 100644 --- a/googleapiclient/discovery_cache/documents/workflows.v1.json +++ b/googleapiclient/discovery_cache/documents/workflows.v1.json @@ -444,7 +444,7 @@ } } }, - "revision": "20210908", + "revision": "20210915", "rootUrl": "https://workflows.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/workflows.v1beta.json b/googleapiclient/discovery_cache/documents/workflows.v1beta.json index dc440acb545..702f465d85d 100644 --- a/googleapiclient/discovery_cache/documents/workflows.v1beta.json +++ b/googleapiclient/discovery_cache/documents/workflows.v1beta.json @@ -444,7 +444,7 @@ } } }, - "revision": "20210908", + "revision": "20210915", "rootUrl": "https://workflows.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/youtube.v3.json b/googleapiclient/discovery_cache/documents/youtube.v3.json index 19601b4ffff..45b6153135e 100644 --- a/googleapiclient/discovery_cache/documents/youtube.v3.json +++ b/googleapiclient/discovery_cache/documents/youtube.v3.json @@ -3789,7 +3789,7 @@ } } }, - "revision": "20210919", + "revision": "20210925", "rootUrl": "https://youtube.googleapis.com/", "schemas": { "AbuseReport": { diff --git a/googleapiclient/discovery_cache/documents/youtubeAnalytics.v2.json b/googleapiclient/discovery_cache/documents/youtubeAnalytics.v2.json index c5f730252f6..b573a70619a 100644 --- a/googleapiclient/discovery_cache/documents/youtubeAnalytics.v2.json +++ b/googleapiclient/discovery_cache/documents/youtubeAnalytics.v2.json @@ -421,7 +421,7 @@ } } }, - "revision": "20210918", + "revision": "20210925", "rootUrl": "https://youtubeanalytics.googleapis.com/", "schemas": { "EmptyResponse": { diff --git a/googleapiclient/discovery_cache/documents/youtubereporting.v1.json b/googleapiclient/discovery_cache/documents/youtubereporting.v1.json index 8a292c79831..8676f039c52 100644 --- a/googleapiclient/discovery_cache/documents/youtubereporting.v1.json +++ b/googleapiclient/discovery_cache/documents/youtubereporting.v1.json @@ -411,7 +411,7 @@ } } }, - "revision": "20210918", + "revision": "20210925", "rootUrl": "https://youtubereporting.googleapis.com/", "schemas": { "Empty": {