From a933dad7e72d3093be480ce6af3965f41db1d193 Mon Sep 17 00:00:00 2001 From: yoshi-code-bot <70984784+yoshi-code-bot@users.noreply.github.com> Date: Tue, 6 Jul 2021 00:26:24 -0700 Subject: [PATCH] chore: Update discovery artifacts (#1430) ## Deleted keys were detected in the following pre-stable discovery artifacts: run v1alpha1 https://github.com/googleapis/google-api-python-client/commit/db18e29c7f616f212121960fe8efd6fb7cdf9b22 ## Discovery Artifact Change Summary: feat(artifactregistry): update the api https://github.com/googleapis/google-api-python-client/commit/bc9a38bf901a63525fb4c7b1e94fd4ce5fb441c3 feat(chat): update the api https://github.com/googleapis/google-api-python-client/commit/eea3c5c177aaded427fd3b5bab80812bf748ef79 feat(cloudasset): update the api https://github.com/googleapis/google-api-python-client/commit/2e31dd0b58d3c656df5aaa042994c637d0100f97 feat(cloudbuild): update the api https://github.com/googleapis/google-api-python-client/commit/3a3b420d53aabe1fdf6ddca483a3d164f72d6268 feat(composer): update the api https://github.com/googleapis/google-api-python-client/commit/78c0d8decbe640c522c45850c97002e7da12f4e0 feat(container): update the api https://github.com/googleapis/google-api-python-client/commit/a54737fe763fd288e54505faace58040cbf8920b feat(datafusion): update the api https://github.com/googleapis/google-api-python-client/commit/f6bf3c6b92fbf7072798b987998bf55ee9276389 feat(dataproc): update the api https://github.com/googleapis/google-api-python-client/commit/3fde9a3604e4811ce02f1062dcee9cef35b1ad51 feat(documentai): update the api https://github.com/googleapis/google-api-python-client/commit/79c556d389889fb0f48c8cc5ad5ab4a2caaab603 feat(groupssettings): update the api https://github.com/googleapis/google-api-python-client/commit/d537f96a20a699629fa85fbdeadb74ead3b32699 feat(logging): update the api https://github.com/googleapis/google-api-python-client/commit/d3548c505e4b1065365584493d15f21a19639626 feat(monitoring): update the api https://github.com/googleapis/google-api-python-client/commit/d24af68a9621fda9d7a576d3615178604a1482d2 feat(paymentsresellersubscription): update the api https://github.com/googleapis/google-api-python-client/commit/cff9039529278d95cee936826b5406867c638430 feat(redis): update the api https://github.com/googleapis/google-api-python-client/commit/46102d1726393f872420820e6200bb83cefd74b6 feat(run): update the api https://github.com/googleapis/google-api-python-client/commit/db18e29c7f616f212121960fe8efd6fb7cdf9b22 feat(slides): update the api https://github.com/googleapis/google-api-python-client/commit/68634cd565914e6003c851ec5f43fa2ff290afca feat(spanner): update the api https://github.com/googleapis/google-api-python-client/commit/289512124fc77a69957b912f06e9c3d315aa0526 feat(storagetransfer): update the api https://github.com/googleapis/google-api-python-client/commit/24895f156f10c03f2da686be95d8c70ea34008a3 --- docs/dyn/accessapproval_v1.folders.html | 6 +- docs/dyn/accessapproval_v1.organizations.html | 6 +- docs/dyn/accessapproval_v1.projects.html | 6 +- docs/dyn/adsense_v2.accounts.reports.html | 4 + ...s.locations.repositories.aptArtifacts.html | 136 ++ ...beta2.projects.locations.repositories.html | 10 + ...s.locations.repositories.yumArtifacts.html | 136 ++ docs/dyn/baremetalsolution_v1alpha1.html | 111 + .../baremetalsolution_v1alpha1.projects.html | 106 + ...lsolution_v1alpha1.projects.locations.html | 259 +++ ...v1alpha1.projects.locations.instances.html | 312 +++ ...tion_v1alpha1.projects.locations.luns.html | 188 ++ ...n_v1alpha1.projects.locations.volumes.html | 253 +++ ....projects.locations.volumes.snapshots.html | 243 +++ ..._v1alpha1.projects.provisioningQuotas.html | 136 ++ ...ha1.projects.snapshotSchedulePolicies.html | 300 +++ ...talsolution_v1alpha1.projects.sshKeys.html | 186 ++ docs/dyn/civicinfo_v2.representatives.html | 2 + docs/dyn/cloudasset_v1.v1.html | 13 +- docs/dyn/cloudbuild_v1.projects.builds.html | 36 +- ...oudbuild_v1.projects.locations.builds.html | 36 +- .../dyn/cloudbuild_v1.projects.locations.html | 5 + ...dbuild_v1.projects.locations.triggers.html | 60 +- ...ild_v1.projects.locations.workerPools.html | 369 ++++ docs/dyn/cloudbuild_v1.projects.triggers.html | 60 +- ...ccounts.channelPartnerLinks.customers.html | 48 +- .../cloudchannel_v1.accounts.customers.html | 48 +- docs/dyn/cloudidentity_v1.groups.html | 2 +- docs/dyn/cloudidentity_v1beta1.groups.html | 2 +- ...beta1.projects.locations.environments.html | 102 +- ...tainer_v1.projects.locations.clusters.html | 4 + .../container_v1.projects.zones.clusters.html | 4 + ...r_v1beta1.projects.locations.clusters.html | 4 + ...ainer_v1beta1.projects.zones.clusters.html | 4 + .../dataflow_v1b3.projects.jobs.messages.html | 2 +- ...v1b3.projects.locations.jobs.messages.html | 2 +- .../dyn/datafusion_v1.projects.locations.html | 2 +- ...usion_v1.projects.locations.instances.html | 20 +- ...datafusion_v1beta1.projects.locations.html | 2 +- ...jects.locations.instances.dnsPeerings.html | 198 ++ ..._v1beta1.projects.locations.instances.html | 17 + ...ojects.locations.instances.namespaces.html | 93 + ...dataproc_v1.projects.regions.clusters.html | 58 +- docs/dyn/datastore_v1.projects.html | 286 +-- ...ogflow_v2.projects.agent.environments.html | 12 +- ...projects.locations.agent.environments.html | 12 +- ...w_v2beta1.projects.agent.environments.html | 12 +- ...projects.locations.agent.environments.html | 12 +- docs/dyn/drive_v2.drives.html | 2 +- docs/dyn/drive_v3.drives.html | 2 +- ...1beta.projects.apps.deviceCheckConfig.html | 8 +- ...firebaseappcheck_v1beta.projects.apps.html | 2 +- ..._v1beta.projects.apps.recaptchaConfig.html | 8 +- ..._v1beta1.projects.databases.documents.html | 510 +++-- docs/dyn/groupssettings_v1.groups.html | 5 + docs/dyn/index.md | 3 + docs/dyn/logging_v2.billingAccounts.html | 5 + .../logging_v2.billingAccounts.locations.html | 5 + ....billingAccounts.locations.operations.html | 176 ++ ...logging_v2.billingAccounts.operations.html | 124 ++ docs/dyn/logging_v2.entries.html | 46 + docs/dyn/logging_v2.folders.locations.html | 5 + ...gging_v2.folders.locations.operations.html | 214 ++ docs/dyn/logging_v2.locations.html | 5 + docs/dyn/logging_v2.locations.operations.html | 214 ++ .../logging_v2.organizations.locations.html | 5 + ...v2.organizations.locations.operations.html | 214 ++ docs/dyn/logging_v2.projects.locations.html | 5 + ...ging_v2.projects.locations.operations.html | 214 ++ .../monitoring_v1.projects.dashboards.html | 72 + .../monitoring_v3.projects.alertPolicies.html | 66 + docs/dyn/monitoring_v3.services.html | 18 + ...ubscription_v1.partners.subscriptions.html | 44 +- ...redis_v1.projects.locations.instances.html | 8 +- ..._v1beta1.projects.locations.instances.html | 8 +- ...rojects.locations.catalogs.placements.html | 6 +- ...rojects.locations.catalogs.userEvents.html | 12 +- ...rojects.locations.catalogs.placements.html | 6 +- ...rojects.locations.catalogs.userEvents.html | 12 +- ...rojects.locations.catalogs.placements.html | 6 +- ...rojects.locations.catalogs.userEvents.html | 12 +- docs/dyn/run_v1alpha1.namespaces.jobs.html | 1576 ++++++-------- docs/dyn/slides_v1.presentations.html | 12 + docs/dyn/slides_v1.presentations.pages.html | 1 + ...anner_v1.projects.instances.databases.html | 1 + ...projects.instances.databases.sessions.html | 98 +- docs/dyn/spanner_v1.scans.html | 1 + ...ragetransfer_v1.googleServiceAccounts.html | 1 + docs/dyn/storagetransfer_v1.transferJobs.html | 18 +- docs/dyn/sts_v1.v1.html | 4 +- docs/dyn/sts_v1beta.v1beta.html | 4 +- .../acceleratedmobilepageurl.v1.json | 2 +- .../documents/accessapproval.v1.json | 4 +- .../documents/adexchangebuyer.v1.2.json | 4 +- .../documents/adexchangebuyer.v1.3.json | 4 +- .../documents/adexchangebuyer.v1.4.json | 4 +- .../documents/adexchangebuyer2.v2beta1.json | 2 +- .../documents/admin.datatransfer_v1.json | 2 +- .../documents/admin.directory_v1.json | 2 +- .../documents/admin.reports_v1.json | 2 +- .../discovery_cache/documents/admob.v1.json | 2 +- .../documents/admob.v1beta.json | 2 +- .../discovery_cache/documents/adsense.v2.json | 10 +- .../documents/alertcenter.v1beta1.json | 2 +- .../documents/analyticsadmin.v1alpha.json | 2 +- .../documents/analyticsdata.v1beta.json | 2 +- .../androiddeviceprovisioning.v1.json | 2 +- .../documents/androidenterprise.v1.json | 2 +- .../documents/androidmanagement.v1.json | 2 +- .../documents/androidpublisher.v3.json | 2 +- .../documents/apigateway.v1.json | 2 +- .../documents/apigateway.v1beta.json | 2 +- .../discovery_cache/documents/apikeys.v2.json | 2 +- .../documents/appengine.v1.json | 2 +- .../documents/appengine.v1alpha.json | 2 +- .../documents/appengine.v1beta.json | 2 +- .../documents/area120tables.v1alpha1.json | 6 +- .../documents/artifactregistry.v1.json | 245 ++- .../documents/artifactregistry.v1beta1.json | 245 ++- .../documents/artifactregistry.v1beta2.json | 331 ++- .../documents/baremetalsolution.v1.json | 4 +- .../documents/baremetalsolution.v1alpha1.json | 1821 +++++++++++++++++ .../documents/bigqueryconnection.v1beta1.json | 2 +- .../documents/bigquerydatatransfer.v1.json | 2 +- .../documents/bigtableadmin.v1.json | 2 +- .../documents/bigtableadmin.v2.json | 2 +- .../documents/billingbudgets.v1.json | 2 +- .../documents/billingbudgets.v1beta1.json | 2 +- .../documents/binaryauthorization.v1.json | 2 +- .../binaryauthorization.v1beta1.json | 2 +- .../discovery_cache/documents/blogger.v2.json | 2 +- .../discovery_cache/documents/blogger.v3.json | 2 +- .../discovery_cache/documents/books.v1.json | 2 +- .../documents/calendar.v3.json | 2 +- .../discovery_cache/documents/chat.v1.json | 3 +- .../documents/chromemanagement.v1.json | 2 +- .../documents/chromepolicy.v1.json | 2 +- .../documents/chromeuxreport.v1.json | 2 +- .../documents/civicinfo.v2.json | 18 +- .../documents/classroom.v1.json | 2 +- .../documents/cloudasset.v1.json | 34 +- .../documents/cloudasset.v1beta1.json | 2 +- .../documents/cloudasset.v1p1beta1.json | 2 +- .../documents/cloudasset.v1p4beta1.json | 2 +- .../documents/cloudasset.v1p5beta1.json | 2 +- .../documents/cloudasset.v1p7beta1.json | 2 +- .../documents/cloudbuild.v1.json | 420 +++- .../documents/cloudbuild.v1alpha1.json | 86 +- .../documents/cloudbuild.v1alpha2.json | 86 +- .../documents/cloudbuild.v1beta1.json | 86 +- .../documents/cloudchannel.v1.json | 10 +- .../clouderrorreporting.v1beta1.json | 2 +- .../documents/cloudfunctions.v1.json | 2 +- .../documents/cloudidentity.v1.json | 4 +- .../documents/cloudidentity.v1beta1.json | 4 +- .../documents/cloudiot.v1.json | 2 +- .../documents/cloudkms.v1.json | 2 +- .../documents/cloudprofiler.v2.json | 2 +- .../documents/cloudresourcemanager.v1.json | 2 +- .../cloudresourcemanager.v1beta1.json | 2 +- .../documents/cloudresourcemanager.v2.json | 2 +- .../cloudresourcemanager.v2beta1.json | 2 +- .../documents/cloudresourcemanager.v3.json | 2 +- .../documents/cloudsearch.v1.json | 2 +- .../documents/cloudshell.v1.json | 2 +- .../documents/cloudtrace.v1.json | 2 +- .../documents/cloudtrace.v2.json | 2 +- .../documents/cloudtrace.v2beta1.json | 2 +- .../documents/composer.v1.json | 13 +- .../documents/composer.v1beta1.json | 146 +- .../documents/container.v1.json | 6 +- .../documents/container.v1beta1.json | 6 +- .../documents/containeranalysis.v1alpha1.json | 2 +- .../documents/containeranalysis.v1beta1.json | 2 +- .../documents/customsearch.v1.json | 2 +- .../documents/datacatalog.v1.json | 2 +- .../documents/datacatalog.v1beta1.json | 2 +- .../documents/dataflow.v1b3.json | 4 +- .../documents/datafusion.v1.json | 29 +- .../documents/datafusion.v1beta1.json | 293 ++- .../documents/datalabeling.v1beta1.json | 2 +- .../documents/datamigration.v1.json | 2 +- .../documents/datamigration.v1beta1.json | 2 +- .../documents/dataproc.v1.json | 68 +- .../documents/dataproc.v1beta2.json | 2 +- .../documents/dialogflow.v2.json | 4 +- .../documents/dialogflow.v2beta1.json | 4 +- .../documents/dialogflow.v3.json | 2 +- .../documents/dialogflow.v3beta1.json | 2 +- .../documents/digitalassetlinks.v1.json | 2 +- .../discovery_cache/documents/dlp.v2.json | 2 +- .../discovery_cache/documents/dns.v1.json | 2 +- .../documents/dns.v1beta2.json | 2 +- .../documents/documentai.v1.json | 19 +- .../documents/documentai.v1beta2.json | 19 +- .../documents/documentai.v1beta3.json | 19 +- .../documents/domains.v1alpha2.json | 2 +- .../documents/domains.v1beta1.json | 2 +- .../documents/domainsrdap.v1.json | 2 +- .../documents/doubleclicksearch.v2.json | 2 +- .../discovery_cache/documents/drive.v2.json | 6 +- .../discovery_cache/documents/drive.v3.json | 6 +- .../documents/driveactivity.v2.json | 2 +- .../documents/essentialcontacts.v1.json | 2 +- .../documents/factchecktools.v1alpha1.json | 2 +- .../discovery_cache/documents/fcm.v1.json | 2 +- .../discovery_cache/documents/file.v1.json | 2 +- .../documents/file.v1beta1.json | 2 +- .../documents/firebase.v1beta1.json | 2 +- .../documents/firebaseappcheck.v1beta.json | 8 +- .../documents/firebasedatabase.v1beta.json | 2 +- .../documents/firebasedynamiclinks.v1.json | 2 +- .../documents/firebaseml.v1.json | 2 +- .../documents/firebaseml.v1beta2.json | 2 +- .../discovery_cache/documents/fitness.v1.json | 2 +- .../discovery_cache/documents/games.v1.json | 2 +- .../gamesConfiguration.v1configuration.json | 2 +- .../gamesManagement.v1management.json | 2 +- .../documents/gameservices.v1.json | 2 +- .../documents/gameservices.v1beta.json | 2 +- .../discovery_cache/documents/gkehub.v1.json | 2 +- .../documents/gkehub.v1alpha.json | 2 +- .../documents/gkehub.v1alpha2.json | 2 +- .../documents/gkehub.v1beta.json | 2 +- .../documents/gkehub.v1beta1.json | 2 +- .../discovery_cache/documents/gmail.v1.json | 2 +- .../documents/gmailpostmastertools.v1.json | 2 +- .../gmailpostmastertools.v1beta1.json | 2 +- .../documents/groupsmigration.v1.json | 2 +- .../documents/groupssettings.v1.json | 6 +- .../documents/iamcredentials.v1.json | 2 +- .../documents/ideahub.v1alpha.json | 2 +- .../documents/indexing.v3.json | 2 +- .../documents/libraryagent.v1.json | 2 +- .../documents/licensing.v1.json | 2 +- .../documents/lifesciences.v2beta.json | 2 +- .../documents/localservices.v1.json | 2 +- .../discovery_cache/documents/logging.v2.json | 793 ++++++- .../documents/memcache.v1.json | 2 +- .../documents/memcache.v1beta2.json | 2 +- .../documents/metastore.v1alpha.json | 2 +- .../documents/metastore.v1beta.json | 2 +- .../documents/monitoring.v1.json | 17 +- .../documents/monitoring.v3.json | 58 +- .../mybusinessaccountmanagement.v1.json | 2 +- .../documents/mybusinesslodging.v1.json | 2 +- .../documents/mybusinessplaceactions.v1.json | 2 +- .../networkconnectivity.v1alpha1.json | 4 +- .../documents/networkmanagement.v1.json | 2 +- .../documents/networkmanagement.v1beta1.json | 2 +- .../documents/ondemandscanning.v1.json | 2 +- .../documents/ondemandscanning.v1beta1.json | 2 +- .../documents/orgpolicy.v2.json | 2 +- .../documents/osconfig.v1.json | 2 +- .../documents/osconfig.v1alpha.json | 2 +- .../documents/osconfig.v1beta.json | 2 +- .../discovery_cache/documents/oslogin.v1.json | 2 +- .../documents/oslogin.v1alpha.json | 2 +- .../documents/oslogin.v1beta.json | 2 +- .../documents/pagespeedonline.v5.json | 2 +- .../paymentsresellersubscription.v1.json | 23 +- .../discovery_cache/documents/people.v1.json | 2 +- .../documents/playablelocations.v3.json | 2 +- .../documents/playcustomapp.v1.json | 2 +- .../documents/policysimulator.v1.json | 2 +- .../documents/policysimulator.v1beta1.json | 2 +- .../documents/privateca.v1.json | 2 +- .../documents/privateca.v1beta1.json | 2 +- .../documents/prod_tt_sasportal.v1alpha1.json | 2 +- .../discovery_cache/documents/pubsub.v1.json | 2 +- .../documents/pubsub.v1beta1a.json | 2 +- .../documents/pubsub.v1beta2.json | 2 +- .../documents/pubsublite.v1.json | 2 +- .../documents/realtimebidding.v1.json | 2 +- .../documents/realtimebidding.v1alpha.json | 2 +- .../documents/recaptchaenterprise.v1.json | 2 +- .../documents/recommender.v1.json | 2 +- .../documents/recommender.v1beta1.json | 2 +- .../discovery_cache/documents/redis.v1.json | 4 +- .../documents/redis.v1beta1.json | 25 +- .../documents/remotebuildexecution.v1.json | 2 +- .../remotebuildexecution.v1alpha.json | 2 +- .../documents/remotebuildexecution.v2.json | 2 +- .../documents/reseller.v1.json | 2 +- .../documents/resourcesettings.v1.json | 2 +- .../discovery_cache/documents/retail.v2.json | 8 +- .../documents/retail.v2alpha.json | 8 +- .../documents/retail.v2beta.json | 8 +- .../discovery_cache/documents/run.v1.json | 4 +- .../documents/run.v1alpha1.json | 420 +--- .../documents/runtimeconfig.v1.json | 2 +- .../documents/safebrowsing.v4.json | 2 +- .../discovery_cache/documents/script.v1.json | 2 +- .../documents/searchconsole.v1.json | 2 +- .../serviceconsumermanagement.v1.json | 2 +- .../serviceconsumermanagement.v1beta1.json | 2 +- .../documents/servicecontrol.v1.json | 2 +- .../documents/servicecontrol.v2.json | 2 +- .../documents/servicemanagement.v1.json | 2 +- .../documents/servicenetworking.v1.json | 2 +- .../documents/servicenetworking.v1beta.json | 2 +- .../documents/serviceusage.v1.json | 2 +- .../documents/serviceusage.v1beta1.json | 2 +- .../discovery_cache/documents/sheets.v4.json | 2 +- .../discovery_cache/documents/slides.v1.json | 6 +- .../discovery_cache/documents/spanner.v1.json | 9 +- .../discovery_cache/documents/speech.v1.json | 2 +- .../documents/speech.v1p1beta1.json | 2 +- .../documents/speech.v2beta1.json | 2 +- .../discovery_cache/documents/storage.v1.json | 4 +- .../documents/storagetransfer.v1.json | 12 +- .../documents/streetviewpublish.v1.json | 2 +- .../discovery_cache/documents/sts.v1.json | 6 +- .../discovery_cache/documents/sts.v1beta.json | 6 +- .../documents/tagmanager.v1.json | 2 +- .../documents/tagmanager.v2.json | 2 +- .../discovery_cache/documents/tasks.v1.json | 2 +- .../discovery_cache/documents/testing.v1.json | 2 +- .../documents/toolresults.v1beta3.json | 2 +- .../documents/trafficdirector.v2.json | 2 +- .../documents/transcoder.v1beta1.json | 2 +- .../documents/vectortile.v1.json | 2 +- .../discovery_cache/documents/vision.v1.json | 2 +- .../documents/vision.v1p1beta1.json | 2 +- .../documents/vision.v1p2beta1.json | 2 +- .../documents/webfonts.v1.json | 2 +- .../documents/workflows.v1beta.json | 2 +- .../discovery_cache/documents/youtube.v3.json | 2 +- .../documents/youtubeAnalytics.v2.json | 2 +- .../documents/youtubereporting.v1.json | 2 +- 330 files changed, 11174 insertions(+), 2375 deletions(-) create mode 100644 docs/dyn/artifactregistry_v1beta2.projects.locations.repositories.aptArtifacts.html create mode 100644 docs/dyn/artifactregistry_v1beta2.projects.locations.repositories.yumArtifacts.html create mode 100644 docs/dyn/baremetalsolution_v1alpha1.html create mode 100644 docs/dyn/baremetalsolution_v1alpha1.projects.html create mode 100644 docs/dyn/baremetalsolution_v1alpha1.projects.locations.html create mode 100644 docs/dyn/baremetalsolution_v1alpha1.projects.locations.instances.html create mode 100644 docs/dyn/baremetalsolution_v1alpha1.projects.locations.luns.html create mode 100644 docs/dyn/baremetalsolution_v1alpha1.projects.locations.volumes.html create mode 100644 docs/dyn/baremetalsolution_v1alpha1.projects.locations.volumes.snapshots.html create mode 100644 docs/dyn/baremetalsolution_v1alpha1.projects.provisioningQuotas.html create mode 100644 docs/dyn/baremetalsolution_v1alpha1.projects.snapshotSchedulePolicies.html create mode 100644 docs/dyn/baremetalsolution_v1alpha1.projects.sshKeys.html create mode 100644 docs/dyn/cloudbuild_v1.projects.locations.workerPools.html create mode 100644 docs/dyn/datafusion_v1beta1.projects.locations.instances.dnsPeerings.html create mode 100644 docs/dyn/logging_v2.billingAccounts.locations.operations.html create mode 100644 docs/dyn/logging_v2.billingAccounts.operations.html create mode 100644 docs/dyn/logging_v2.folders.locations.operations.html create mode 100644 docs/dyn/logging_v2.locations.operations.html create mode 100644 docs/dyn/logging_v2.organizations.locations.operations.html create mode 100644 docs/dyn/logging_v2.projects.locations.operations.html create mode 100644 googleapiclient/discovery_cache/documents/baremetalsolution.v1alpha1.json diff --git a/docs/dyn/accessapproval_v1.folders.html b/docs/dyn/accessapproval_v1.folders.html index bca1e5f5bb2..1c58dd61212 100644 --- a/docs/dyn/accessapproval_v1.folders.html +++ b/docs/dyn/accessapproval_v1.folders.html @@ -133,7 +133,7 @@
+ close()
Close httplib2 connections.
+
+ import_(parent, body=None, x__xgafv=None)
Imports Apt artifacts. The returned Operation will complete once the resources are imported. Package, Version, and File resources are created based on the imported artifacts. Imported artifacts that conflict with existing resources are ignored.
+close()
+ Close httplib2 connections.+
import_(parent, body=None, x__xgafv=None)
+ Imports Apt artifacts. The returned Operation will complete once the resources are imported. Package, Version, and File resources are created based on the imported artifacts. Imported artifacts that conflict with existing resources are ignored. + +Args: + parent: string, The name of the parent resource where the artifacts will be imported. (required) + body: object, The request body. + The object takes the form of: + +{ # The request to import new apt artifacts. + "gcsSource": { # Google Cloud Storage location where the artifacts currently reside. # Google Cloud Storage location where input content is located. + "uris": [ # Cloud Storage paths URI (e.g., gs://my_bucket//my_object). + "A String", + ], + "useWildcards": True or False, # Supports URI wildcards for matching multiple objects from a single URI. + }, +} + + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # This resource represents a long-running operation that is the result of a network API call. + "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available. + "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`. + "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, +}+
+ aptArtifacts()
+
Returns the aptArtifacts Resource.
+ @@ -84,6 +89,11 @@Returns the packages Resource.
+
+ yumArtifacts()
+
Returns the yumArtifacts Resource.
+Close httplib2 connections.
diff --git a/docs/dyn/artifactregistry_v1beta2.projects.locations.repositories.yumArtifacts.html b/docs/dyn/artifactregistry_v1beta2.projects.locations.repositories.yumArtifacts.html new file mode 100644 index 00000000000..f1cf7b0f94a --- /dev/null +++ b/docs/dyn/artifactregistry_v1beta2.projects.locations.repositories.yumArtifacts.html @@ -0,0 +1,136 @@ + + + +
+ close()
Close httplib2 connections.
+
+ import_(parent, body=None, x__xgafv=None)
Imports Yum (RPM) artifacts. The returned Operation will complete once the resources are imported. Package, Version, and File resources are created based on the imported artifacts. Imported artifacts that conflict with existing resources are ignored.
+close()
+ Close httplib2 connections.+
import_(parent, body=None, x__xgafv=None)
+ Imports Yum (RPM) artifacts. The returned Operation will complete once the resources are imported. Package, Version, and File resources are created based on the imported artifacts. Imported artifacts that conflict with existing resources are ignored. + +Args: + parent: string, The name of the parent resource where the artifacts will be imported. (required) + body: object, The request body. + The object takes the form of: + +{ # The request to import new yum artifacts. + "gcsSource": { # Google Cloud Storage location where the artifacts currently reside. # Google Cloud Storage location where input content is located. + "uris": [ # Cloud Storage paths URI (e.g., gs://my_bucket//my_object). + "A String", + ], + "useWildcards": True or False, # Supports URI wildcards for matching multiple objects from a single URI. + }, +} + + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # This resource represents a long-running operation that is the result of a network API call. + "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available. + "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`. + "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, +}+
+ projects()
+
Returns the projects Resource.
+ +
+ close()
Close httplib2 connections.
+ +Create a BatchHttpRequest object based on the discovery document.
+close()
+ Close httplib2 connections.+
new_batch_http_request()
+ Create a BatchHttpRequest object based on the discovery document. + + Args: + callback: callable, A callback to be called for each response, of the + form callback(id, response, exception). The first parameter is the + request id, and the second is the deserialized response object. The + third is an apiclient.errors.HttpError exception object if an HTTP + error occurred while processing the request, or None if no error + occurred. + + Returns: + A BatchHttpRequest object based on the discovery document. ++
+ locations()
+
Returns the locations Resource.
+ + +Returns the provisioningQuotas Resource.
+ +
+ snapshotSchedulePolicies()
+
Returns the snapshotSchedulePolicies Resource.
+ +
+ sshKeys()
+
Returns the sshKeys Resource.
+ +
+ close()
Close httplib2 connections.
+close()
+ Close httplib2 connections.+
+ instances()
+
Returns the instances Resource.
+ +
+ luns()
+
Returns the luns Resource.
+ +
+ volumes()
+
Returns the volumes Resource.
+ +
+ close()
Close httplib2 connections.
+
+ submitProvisioningConfig(project, location, body=None, x__xgafv=None)
Submit a provisiong configuration for a given project.
+close()
+ Close httplib2 connections.+
submitProvisioningConfig(project, location, body=None, x__xgafv=None)
+ Submit a provisiong configuration for a given project. + +Args: + project: string, Required. The target project of the provisioning request. (required) + location: string, Required. The target location of the provisioning request. (required) + body: object, The request body. + The object takes the form of: + +{ # Request for SubmitProvisioningConfig. + "provisioningConfig": { # An provisioning configuration. # Required. The ProvisioningConfig to submit. + "instances": [ # Instances to be created. + { # Configuration parameters for a new instance. + "clientNetwork": { # A network. # Client network address. + "address": "A String", # IP address to be assigned to the server. + "networkId": "A String", # Id of the network to use, within the same ProvisioningConfig request. + }, + "hyperthreading": True or False, # Whether the instance should be provisioned with Hyperthreading enabled. + "id": "A String", # A transient unique identifier to idenfity an instance within an ProvisioningConfig request. + "instanceType": "A String", # Instance type. + "location": "A String", # Location where to deploy the instance. + "osImage": "A String", # OS image to initialize the instance. + "privateNetwork": { # A network. # Private network address, if any. + "address": "A String", # IP address to be assigned to the server. + "networkId": "A String", # Id of the network to use, within the same ProvisioningConfig request. + }, + }, + ], + "networks": [ # Networks to be created. + { # Configuration parameters for a new network. + "bandwidth": "A String", # Interconnect bandwidth. Set only when type is CLIENT. + "cidr": "A String", # CIDR range of the network. + "id": "A String", # A transient unique identifier to identify a volume within an ProvisioningConfig request. + "location": "A String", # Location where to deploy the network. + "serviceCidr": "A String", # Service CIDR, if any. + "type": "A String", # The type of this network. + "vlanAttachments": [ # List of VLAN attachments. As of now there are always 2 attachments, but it is going to change in the future (multi vlan). + { # A GCP vlan attachment. + "id": "A String", # Identifier of the VLAN attachment. + "pairingKey": "A String", # Attachment pairing key. + }, + ], + }, + ], + "ticketId": "A String", # A reference to track the request. + "volumes": [ # Volumes to be created. + { # Configuration parameters for a new volume. + "id": "A String", # A transient unique identifier to identify a volume within an ProvisioningConfig request. + "location": "A String", # Location where to deploy the volume. + "lunRanges": [ # LUN ranges to be configured. Set only when protocol is PROTOCOL_FC. + { # A LUN range. + "quantity": 42, # Number of LUNs to create. + "sizeGb": 42, # The requested size of each LUN, in GB. + }, + ], + "machineIds": [ # Machine ids connected to this volume. Set only when protocol is PROTOCOL_FC. + "A String", + ], + "nfsExports": [ # NFS exports. Set only when protocol is PROTOCOL_NFS. + { # A NFS export entry. + "allowDev": True or False, # Allow dev. + "allowSuid": True or False, # Allow the setuid flag. + "cidr": "A String", # A CIDR range. + "machineId": "A String", # A single machine, identified by an ID. + "networkId": "A String", # Network to use to publish the export. + "noRootSquash": True or False, # Disable root squashing. + "permissions": "A String", # Export permissions. + }, + ], + "protocol": "A String", # Volume protocol. + "sizeGb": 42, # The requested size of this volume, in GB. This will be updated in a later iteration with a generic size field. + "snapshotsEnabled": True or False, # Whether snapshots should be enabled. + "type": "A String", # The type of this Volume. + }, + ], + }, +} + + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # An provisioning configuration. + "instances": [ # Instances to be created. + { # Configuration parameters for a new instance. + "clientNetwork": { # A network. # Client network address. + "address": "A String", # IP address to be assigned to the server. + "networkId": "A String", # Id of the network to use, within the same ProvisioningConfig request. + }, + "hyperthreading": True or False, # Whether the instance should be provisioned with Hyperthreading enabled. + "id": "A String", # A transient unique identifier to idenfity an instance within an ProvisioningConfig request. + "instanceType": "A String", # Instance type. + "location": "A String", # Location where to deploy the instance. + "osImage": "A String", # OS image to initialize the instance. + "privateNetwork": { # A network. # Private network address, if any. + "address": "A String", # IP address to be assigned to the server. + "networkId": "A String", # Id of the network to use, within the same ProvisioningConfig request. + }, + }, + ], + "networks": [ # Networks to be created. + { # Configuration parameters for a new network. + "bandwidth": "A String", # Interconnect bandwidth. Set only when type is CLIENT. + "cidr": "A String", # CIDR range of the network. + "id": "A String", # A transient unique identifier to identify a volume within an ProvisioningConfig request. + "location": "A String", # Location where to deploy the network. + "serviceCidr": "A String", # Service CIDR, if any. + "type": "A String", # The type of this network. + "vlanAttachments": [ # List of VLAN attachments. As of now there are always 2 attachments, but it is going to change in the future (multi vlan). + { # A GCP vlan attachment. + "id": "A String", # Identifier of the VLAN attachment. + "pairingKey": "A String", # Attachment pairing key. + }, + ], + }, + ], + "ticketId": "A String", # A reference to track the request. + "volumes": [ # Volumes to be created. + { # Configuration parameters for a new volume. + "id": "A String", # A transient unique identifier to identify a volume within an ProvisioningConfig request. + "location": "A String", # Location where to deploy the volume. + "lunRanges": [ # LUN ranges to be configured. Set only when protocol is PROTOCOL_FC. + { # A LUN range. + "quantity": 42, # Number of LUNs to create. + "sizeGb": 42, # The requested size of each LUN, in GB. + }, + ], + "machineIds": [ # Machine ids connected to this volume. Set only when protocol is PROTOCOL_FC. + "A String", + ], + "nfsExports": [ # NFS exports. Set only when protocol is PROTOCOL_NFS. + { # A NFS export entry. + "allowDev": True or False, # Allow dev. + "allowSuid": True or False, # Allow the setuid flag. + "cidr": "A String", # A CIDR range. + "machineId": "A String", # A single machine, identified by an ID. + "networkId": "A String", # Network to use to publish the export. + "noRootSquash": True or False, # Disable root squashing. + "permissions": "A String", # Export permissions. + }, + ], + "protocol": "A String", # Volume protocol. + "sizeGb": 42, # The requested size of this volume, in GB. This will be updated in a later iteration with a generic size field. + "snapshotsEnabled": True or False, # Whether snapshots should be enabled. + "type": "A String", # The type of this Volume. + }, + ], +}+
+ close()
Close httplib2 connections.
+
+ disableInteractiveSerialConsole(instance, body=None, x__xgafv=None)
Disable the interactive serial console feature on a specific machine.
+
+ enableInteractiveSerialConsole(instance, body=None, x__xgafv=None)
Enable the interactive serial console feature on a specific machine.
+ +Get details for a specific named Instance.
+
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)
List Instances (physical servers).
+
+ list_next(previous_request, previous_response)
Retrieves the next page of results.
+
+ readSerialPortOutput(instance, startByte=None, x__xgafv=None)
Read the most recent serial port output from a machine.
+
+ resetInstance(instance, body=None, x__xgafv=None)
Perform an ungraceful, hard reset on a machine (equivalent to physically turning power off and then back on).
+close()
+ Close httplib2 connections.+
disableInteractiveSerialConsole(instance, body=None, x__xgafv=None)
+ Disable the interactive serial console feature on a specific machine. + +Args: + instance: string, Required. Name of the instance to disable the interactive serial console feature on. (required) + body: object, The request body. + The object takes the form of: + +{ # Request for DisableInteractiveSerialConsole. +} + + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Response for DisableInteractiveSerialConsole. +}+
enableInteractiveSerialConsole(instance, body=None, x__xgafv=None)
+ Enable the interactive serial console feature on a specific machine. + +Args: + instance: string, Required. Name of the instance to enable the interactive serial console feature on. (required) + body: object, The request body. + The object takes the form of: + +{ # Request for EnableInteractiveSerialConsole. +} + + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Response for EnableInteractiveSerialConsole. +}+
get(name, x__xgafv=None)
+ Get details for a specific named Instance. + +Args: + name: string, Required. The name of the Instance to retrieve. (required) + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # An Instance. + "hyperthreadingEnabled": True or False, # Is hyperthreading enabled for this instance? + "luns": [ # The Luns attached to this instance + { # A storage Lun. + "isBoot": True or False, # Whether this Lun is a boot Lun. + "multiprotocolType": "A String", # The multiprotocol type of this Lun. + "name": "A String", # Output only. The name of this Lun. + "remoteVolume": { # Volume registered in the project. # The storage volume that this Lun is attached to. + "autoGrownSizeGb": "A String", # The size, in GB, that this Volume has expanded as a result of an auto grow policy. + "currentSizeGb": "A String", # The current size of this Volume, in GB, including space reserved for snapshots. This size may be different than the requested size if the Volume has been configured with auto grow or auto shrink. + "name": "A String", # Output only. The name of this Volume. + "remainingSpaceGb": "A String", # The space remaining in the Volume for new LUNs, in GB, excluding space reserved for snapshots. + "requestedSizeGb": "A String", # The requested size of this Volume, in GB. + "snapshotReservedSpacePercent": 42, # The percent of space on this Volume reserved for snapshots. + "snapshotReservedSpaceRemainingGb": "A String", # The amount, in GB, of space available in this Volume's reserved snapshot space. + "snapshotReservedSpaceUsedPercent": 42, # The percent of reserved snapshot space on this Volume that is actually used by snapshot copies. This may be higher than 100% if snapshot copies are occupying more space than has been reserved on the Volume. + "state": "A String", # The state of this Volume. + "type": "A String", # The type of this Volume. + }, + "shareable": True or False, # Whether this Lun is allowed to be shared between multiple physical servers. + "sizeGb": "A String", # The size of this Lun, in gigabytes. + "state": "A String", # The state of this Lun. + }, + ], + "name": "A String", # Output only. The name of this Instance. + "scheduledPowerResetTime": "A String", # The scheduled power reset time. + "sshEnabled": True or False, # Is SSH enabled for this instance? + "state": "A String", # The state of this Instance. +}+
list(parent, pageSize=None, pageToken=None, x__xgafv=None)
+ List Instances (physical servers). + +Args: + parent: string, Required. The location to list Instances in. (required) + pageSize: integer, The maximum number of items to return. + pageToken: string, The next_page_token value returned from a previous List request, if any. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Response for ListInstances. + "instances": [ # The Instances in this project. + { # An Instance. + "hyperthreadingEnabled": True or False, # Is hyperthreading enabled for this instance? + "luns": [ # The Luns attached to this instance + { # A storage Lun. + "isBoot": True or False, # Whether this Lun is a boot Lun. + "multiprotocolType": "A String", # The multiprotocol type of this Lun. + "name": "A String", # Output only. The name of this Lun. + "remoteVolume": { # Volume registered in the project. # The storage volume that this Lun is attached to. + "autoGrownSizeGb": "A String", # The size, in GB, that this Volume has expanded as a result of an auto grow policy. + "currentSizeGb": "A String", # The current size of this Volume, in GB, including space reserved for snapshots. This size may be different than the requested size if the Volume has been configured with auto grow or auto shrink. + "name": "A String", # Output only. The name of this Volume. + "remainingSpaceGb": "A String", # The space remaining in the Volume for new LUNs, in GB, excluding space reserved for snapshots. + "requestedSizeGb": "A String", # The requested size of this Volume, in GB. + "snapshotReservedSpacePercent": 42, # The percent of space on this Volume reserved for snapshots. + "snapshotReservedSpaceRemainingGb": "A String", # The amount, in GB, of space available in this Volume's reserved snapshot space. + "snapshotReservedSpaceUsedPercent": 42, # The percent of reserved snapshot space on this Volume that is actually used by snapshot copies. This may be higher than 100% if snapshot copies are occupying more space than has been reserved on the Volume. + "state": "A String", # The state of this Volume. + "type": "A String", # The type of this Volume. + }, + "shareable": True or False, # Whether this Lun is allowed to be shared between multiple physical servers. + "sizeGb": "A String", # The size of this Lun, in gigabytes. + "state": "A String", # The state of this Lun. + }, + ], + "name": "A String", # Output only. The name of this Instance. + "scheduledPowerResetTime": "A String", # The scheduled power reset time. + "sshEnabled": True or False, # Is SSH enabled for this instance? + "state": "A String", # The state of this Instance. + }, + ], + "nextPageToken": "A String", # Token to retrieve the next page of results, or empty if there are no more results in the list. +}+
list_next(previous_request, previous_response)
+ Retrieves the next page of results. + +Args: + previous_request: The request for the previous page. (required) + previous_response: The response from the request for the previous page. (required) + +Returns: + A request object that you can call 'execute()' on to request the next + page. Returns None if there are no more items in the collection. ++
readSerialPortOutput(instance, startByte=None, x__xgafv=None)
+ Read the most recent serial port output from a machine. + +Args: + instance: string, Required. Name of the instance to get serial port output of. (required) + startByte: string, Optional. The start byte of the serial port output to return. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Response for ReadSerialPortOutput. + "contents": "A String", # The serial port output. + "nextStartByte": "A String", # The byte index to use in a subsequent call to ReadSerialPortOutput to get more output. + "start": "A String", # The start byte index of the included contents. +}+
resetInstance(instance, body=None, x__xgafv=None)
+ Perform an ungraceful, hard reset on a machine (equivalent to physically turning power off and then back on). + +Args: + instance: string, Required. Name of the instance to reset. (required) + body: object, The request body. + The object takes the form of: + +{ # Request for ResetInstance. +} + + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Response for ResetInstance. +}+
+ close()
Close httplib2 connections.
+ +Get details for a specific named Lun.
+
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)
List Luns.
+
+ list_next(previous_request, previous_response)
Retrieves the next page of results.
+close()
+ Close httplib2 connections.+
get(name, x__xgafv=None)
+ Get details for a specific named Lun. + +Args: + name: string, Required. The name of the Lun to retrieve. (required) + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # A storage Lun. + "isBoot": True or False, # Whether this Lun is a boot Lun. + "multiprotocolType": "A String", # The multiprotocol type of this Lun. + "name": "A String", # Output only. The name of this Lun. + "remoteVolume": { # Volume registered in the project. # The storage volume that this Lun is attached to. + "autoGrownSizeGb": "A String", # The size, in GB, that this Volume has expanded as a result of an auto grow policy. + "currentSizeGb": "A String", # The current size of this Volume, in GB, including space reserved for snapshots. This size may be different than the requested size if the Volume has been configured with auto grow or auto shrink. + "name": "A String", # Output only. The name of this Volume. + "remainingSpaceGb": "A String", # The space remaining in the Volume for new LUNs, in GB, excluding space reserved for snapshots. + "requestedSizeGb": "A String", # The requested size of this Volume, in GB. + "snapshotReservedSpacePercent": 42, # The percent of space on this Volume reserved for snapshots. + "snapshotReservedSpaceRemainingGb": "A String", # The amount, in GB, of space available in this Volume's reserved snapshot space. + "snapshotReservedSpaceUsedPercent": 42, # The percent of reserved snapshot space on this Volume that is actually used by snapshot copies. This may be higher than 100% if snapshot copies are occupying more space than has been reserved on the Volume. + "state": "A String", # The state of this Volume. + "type": "A String", # The type of this Volume. + }, + "shareable": True or False, # Whether this Lun is allowed to be shared between multiple physical servers. + "sizeGb": "A String", # The size of this Lun, in gigabytes. + "state": "A String", # The state of this Lun. +}+
list(parent, pageSize=None, pageToken=None, x__xgafv=None)
+ List Luns. + +Args: + parent: string, Required. The location to list Luns in. (required) + pageSize: integer, The maximum number of items to return. + pageToken: string, The next_page_token value returned from a previous List request, if any. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Response for ListLuns. + "luns": [ # The Luns in this project. + { # A storage Lun. + "isBoot": True or False, # Whether this Lun is a boot Lun. + "multiprotocolType": "A String", # The multiprotocol type of this Lun. + "name": "A String", # Output only. The name of this Lun. + "remoteVolume": { # Volume registered in the project. # The storage volume that this Lun is attached to. + "autoGrownSizeGb": "A String", # The size, in GB, that this Volume has expanded as a result of an auto grow policy. + "currentSizeGb": "A String", # The current size of this Volume, in GB, including space reserved for snapshots. This size may be different than the requested size if the Volume has been configured with auto grow or auto shrink. + "name": "A String", # Output only. The name of this Volume. + "remainingSpaceGb": "A String", # The space remaining in the Volume for new LUNs, in GB, excluding space reserved for snapshots. + "requestedSizeGb": "A String", # The requested size of this Volume, in GB. + "snapshotReservedSpacePercent": 42, # The percent of space on this Volume reserved for snapshots. + "snapshotReservedSpaceRemainingGb": "A String", # The amount, in GB, of space available in this Volume's reserved snapshot space. + "snapshotReservedSpaceUsedPercent": 42, # The percent of reserved snapshot space on this Volume that is actually used by snapshot copies. This may be higher than 100% if snapshot copies are occupying more space than has been reserved on the Volume. + "state": "A String", # The state of this Volume. + "type": "A String", # The type of this Volume. + }, + "shareable": True or False, # Whether this Lun is allowed to be shared between multiple physical servers. + "sizeGb": "A String", # The size of this Lun, in gigabytes. + "state": "A String", # The state of this Lun. + }, + ], + "nextPageToken": "A String", # Token to retrieve the next page of results, or empty if there are no more results in the list. +}+
list_next(previous_request, previous_response)
+ Retrieves the next page of results. + +Args: + previous_request: The request for the previous page. (required) + previous_response: The response from the request for the previous page. (required) + +Returns: + A request object that you can call 'execute()' on to request the next + page. Returns None if there are no more items in the collection. ++
+ snapshots()
+
Returns the snapshots Resource.
+ +
+ close()
Close httplib2 connections.
+ +Get details for a specific named Volume.
+
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)
List the volumes for the specified project
+
+ list_next(previous_request, previous_response)
Retrieves the next page of results.
+
+ patch(name, body=None, updateMask=None, x__xgafv=None)
Update certain parameters on a Volume.
+
+ setVolumeSnapshotSchedulePolicy(volume, body=None, x__xgafv=None)
Sets the specified snapshot schedule policy on the specified volume.
+close()
+ Close httplib2 connections.+
get(name, x__xgafv=None)
+ Get details for a specific named Volume. + +Args: + name: string, Required. The name of the Volume to retrieve. (required) + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Volume registered in the project. + "autoGrownSizeGb": "A String", # The size, in GB, that this Volume has expanded as a result of an auto grow policy. + "currentSizeGb": "A String", # The current size of this Volume, in GB, including space reserved for snapshots. This size may be different than the requested size if the Volume has been configured with auto grow or auto shrink. + "name": "A String", # Output only. The name of this Volume. + "remainingSpaceGb": "A String", # The space remaining in the Volume for new LUNs, in GB, excluding space reserved for snapshots. + "requestedSizeGb": "A String", # The requested size of this Volume, in GB. + "snapshotReservedSpacePercent": 42, # The percent of space on this Volume reserved for snapshots. + "snapshotReservedSpaceRemainingGb": "A String", # The amount, in GB, of space available in this Volume's reserved snapshot space. + "snapshotReservedSpaceUsedPercent": 42, # The percent of reserved snapshot space on this Volume that is actually used by snapshot copies. This may be higher than 100% if snapshot copies are occupying more space than has been reserved on the Volume. + "state": "A String", # The state of this Volume. + "type": "A String", # The type of this Volume. +}+
list(parent, pageSize=None, pageToken=None, x__xgafv=None)
+ List the volumes for the specified project + +Args: + parent: string, Required. The location to list Volumes in. (required) + pageSize: integer, The maximum number of items to return. + pageToken: string, The next_page_token value returned from a previous List request, if any. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Response for ListVolumes. + "nextPageToken": "A String", # Token to retrieve the next page of results, or empty if there are no more results in the list. + "volumes": [ # The volumes registered in this project. + { # Volume registered in the project. + "autoGrownSizeGb": "A String", # The size, in GB, that this Volume has expanded as a result of an auto grow policy. + "currentSizeGb": "A String", # The current size of this Volume, in GB, including space reserved for snapshots. This size may be different than the requested size if the Volume has been configured with auto grow or auto shrink. + "name": "A String", # Output only. The name of this Volume. + "remainingSpaceGb": "A String", # The space remaining in the Volume for new LUNs, in GB, excluding space reserved for snapshots. + "requestedSizeGb": "A String", # The requested size of this Volume, in GB. + "snapshotReservedSpacePercent": 42, # The percent of space on this Volume reserved for snapshots. + "snapshotReservedSpaceRemainingGb": "A String", # The amount, in GB, of space available in this Volume's reserved snapshot space. + "snapshotReservedSpaceUsedPercent": 42, # The percent of reserved snapshot space on this Volume that is actually used by snapshot copies. This may be higher than 100% if snapshot copies are occupying more space than has been reserved on the Volume. + "state": "A String", # The state of this Volume. + "type": "A String", # The type of this Volume. + }, + ], +}+
list_next(previous_request, previous_response)
+ Retrieves the next page of results. + +Args: + previous_request: The request for the previous page. (required) + previous_response: The response from the request for the previous page. (required) + +Returns: + A request object that you can call 'execute()' on to request the next + page. Returns None if there are no more items in the collection. ++
patch(name, body=None, updateMask=None, x__xgafv=None)
+ Update certain parameters on a Volume. + +Args: + name: string, Output only. The name of this Volume. (required) + body: object, The request body. + The object takes the form of: + +{ # Volume registered in the project. + "autoGrownSizeGb": "A String", # The size, in GB, that this Volume has expanded as a result of an auto grow policy. + "currentSizeGb": "A String", # The current size of this Volume, in GB, including space reserved for snapshots. This size may be different than the requested size if the Volume has been configured with auto grow or auto shrink. + "name": "A String", # Output only. The name of this Volume. + "remainingSpaceGb": "A String", # The space remaining in the Volume for new LUNs, in GB, excluding space reserved for snapshots. + "requestedSizeGb": "A String", # The requested size of this Volume, in GB. + "snapshotReservedSpacePercent": 42, # The percent of space on this Volume reserved for snapshots. + "snapshotReservedSpaceRemainingGb": "A String", # The amount, in GB, of space available in this Volume's reserved snapshot space. + "snapshotReservedSpaceUsedPercent": 42, # The percent of reserved snapshot space on this Volume that is actually used by snapshot copies. This may be higher than 100% if snapshot copies are occupying more space than has been reserved on the Volume. + "state": "A String", # The state of this Volume. + "type": "A String", # The type of this Volume. +} + + updateMask: string, The list of fields to update. The only currently supported field is `snapshot_reserved_space_percent`. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Volume registered in the project. + "autoGrownSizeGb": "A String", # The size, in GB, that this Volume has expanded as a result of an auto grow policy. + "currentSizeGb": "A String", # The current size of this Volume, in GB, including space reserved for snapshots. This size may be different than the requested size if the Volume has been configured with auto grow or auto shrink. + "name": "A String", # Output only. The name of this Volume. + "remainingSpaceGb": "A String", # The space remaining in the Volume for new LUNs, in GB, excluding space reserved for snapshots. + "requestedSizeGb": "A String", # The requested size of this Volume, in GB. + "snapshotReservedSpacePercent": 42, # The percent of space on this Volume reserved for snapshots. + "snapshotReservedSpaceRemainingGb": "A String", # The amount, in GB, of space available in this Volume's reserved snapshot space. + "snapshotReservedSpaceUsedPercent": 42, # The percent of reserved snapshot space on this Volume that is actually used by snapshot copies. This may be higher than 100% if snapshot copies are occupying more space than has been reserved on the Volume. + "state": "A String", # The state of this Volume. + "type": "A String", # The type of this Volume. +}+
setVolumeSnapshotSchedulePolicy(volume, body=None, x__xgafv=None)
+ Sets the specified snapshot schedule policy on the specified volume. + +Args: + volume: string, Required. Name of the volume to set snapshot schedule policy on. (required) + body: object, The request body. + The object takes the form of: + +{ # Request for SetVolumeSnapshotSchedulePolicy. + "snapshotSchedulePolicy": "A String", # Required. The name of the policy to set on the volume. +} + + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Response for SetVolumeSnapshotSchedulePolicy. +}+
+ close()
Close httplib2 connections.
+
+ create(parent, body=None, x__xgafv=None)
Create snapshot of the specified Volume
+ +Delete specific named snapshot.
+ +Get details for a specific named snapshot.
+
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)
List the Snapshots for the specified Volume
+
+ list_next(previous_request, previous_response)
Retrieves the next page of results.
+
+ restore(name, body=None, x__xgafv=None)
Restore a VolumeSnapshot.
+close()
+ Close httplib2 connections.+
create(parent, body=None, x__xgafv=None)
+ Create snapshot of the specified Volume + +Args: + parent: string, Required. The Volume containing the VolumeSnapshots. (required) + body: object, The request body. + The object takes the form of: + +{ # VolumeSnapshot registered for given Volume + "description": "A String", # The description of this Snapshot. + "name": "A String", # Output only. The name of this Snapshot. + "sizeBytes": "A String", # The real size of this Snapshot, in bytes. + "state": "A String", # The state of this Snapshot. +} + + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # VolumeSnapshot registered for given Volume + "description": "A String", # The description of this Snapshot. + "name": "A String", # Output only. The name of this Snapshot. + "sizeBytes": "A String", # The real size of this Snapshot, in bytes. + "state": "A String", # The state of this Snapshot. +}+
delete(name, x__xgafv=None)
+ Delete specific named snapshot. + +Args: + name: string, Required. The name of the snapshot to delete. (required) + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`. +}+
get(name, x__xgafv=None)
+ Get details for a specific named snapshot. + +Args: + name: string, Required. The name of the snapshot to retrieve. (required) + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # VolumeSnapshot registered for given Volume + "description": "A String", # The description of this Snapshot. + "name": "A String", # Output only. The name of this Snapshot. + "sizeBytes": "A String", # The real size of this Snapshot, in bytes. + "state": "A String", # The state of this Snapshot. +}+
list(parent, pageSize=None, pageToken=None, x__xgafv=None)
+ List the Snapshots for the specified Volume + +Args: + parent: string, Required. The Volume containing the VolumeSnapshots. (required) + pageSize: integer, The maximum number of items to return. + pageToken: string, The next_page_token value returned from a previous List request, if any. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Response for ListVolumeSnapshots. + "nextPageToken": "A String", # Token to retrieve the next page of results, or empty if there are no more results in the list. + "volumeSnapshots": [ # The VolumeSnapshots for the volume. + { # VolumeSnapshot registered for given Volume + "description": "A String", # The description of this Snapshot. + "name": "A String", # Output only. The name of this Snapshot. + "sizeBytes": "A String", # The real size of this Snapshot, in bytes. + "state": "A String", # The state of this Snapshot. + }, + ], +}+
list_next(previous_request, previous_response)
+ Retrieves the next page of results. + +Args: + previous_request: The request for the previous page. (required) + previous_response: The response from the request for the previous page. (required) + +Returns: + A request object that you can call 'execute()' on to request the next + page. Returns None if there are no more items in the collection. ++
restore(name, body=None, x__xgafv=None)
+ Restore a VolumeSnapshot. + +Args: + name: string, Required. Name of the VolumeSnapshot to restore. (required) + body: object, The request body. + The object takes the form of: + +{ # Request for RestoreVolumeSnapshot. +} + + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`. +}+
+ close()
Close httplib2 connections.
+
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)
List the budget details to provision resources on a given project.
+
+ list_next(previous_request, previous_response)
Retrieves the next page of results.
+close()
+ Close httplib2 connections.+
list(parent, pageSize=None, pageToken=None, x__xgafv=None)
+ List the budget details to provision resources on a given project. + +Args: + parent: string, Required. The parent project containing the provisioning quotas. (required) + pageSize: integer, The maximum number of items to return. + pageToken: string, The next_page_token value returned from a previous List request, if any. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Response for ListProvisioningQuotas. + "nextPageToken": "A String", # Token to retrieve the next page of results, or empty if there are no more results in the list. + "provisioningQuotas": [ # The provisioning quotas registered in this project. + { # A provisioning quota for a given project. + "instanceQuota": { # A resource budget. # Instance quota. + "availableMachineCount": 42, # Number of machines than can be created for the given location and instance_type. + "instanceType": "A String", # Instance type. + "location": "A String", # Location where the quota applies. + }, + }, + ], +}+
list_next(previous_request, previous_response)
+ Retrieves the next page of results. + +Args: + previous_request: The request for the previous page. (required) + previous_response: The response from the request for the previous page. (required) + +Returns: + A request object that you can call 'execute()' on to request the next + page. Returns None if there are no more items in the collection. ++
+ close()
Close httplib2 connections.
+
+ create(parent, body=None, x__xgafv=None)
Create a SnapshotSchedulePolicy.
+ +Delete removes named snapshot schedule policy
+ +Get details for a specific snapshot schedule policy
+
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)
List the snapshot schedule policies for the specified project
+
+ list_next(previous_request, previous_response)
Retrieves the next page of results.
+
+ patch(name, body=None, updateMask=None, x__xgafv=None)
Update a SnapshotSchedulePolicy.
+close()
+ Close httplib2 connections.+
create(parent, body=None, x__xgafv=None)
+ Create a SnapshotSchedulePolicy. + +Args: + parent: string, Required. The parent project containing the SnapshotSchedulePolicy. (required) + body: object, The request body. + The object takes the form of: + +{ # A snapshot schedule policy. + "description": "A String", # The description of this SnapshotSchedulePolicy. + "name": "A String", # Output only. The name of this SnapshotSchedulePolicy. + "schedules": [ # The snapshot Schedules contained in this Policy. At most 5 Schedules may be specified. + { # A snapshot schedule. + "crontabSpec": "A String", # The crontab-like specification that this Schedule will use to take snapshots. + "prefix": "A String", # A string to prefix names of snapshots created under this Schedule. + "retentionCount": 42, # The maximum number of snapshots to retain under this Schedule. + }, + ], + "volumes": [ # The names of the Volumes this policy is associated with. + "A String", + ], +} + + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # A snapshot schedule policy. + "description": "A String", # The description of this SnapshotSchedulePolicy. + "name": "A String", # Output only. The name of this SnapshotSchedulePolicy. + "schedules": [ # The snapshot Schedules contained in this Policy. At most 5 Schedules may be specified. + { # A snapshot schedule. + "crontabSpec": "A String", # The crontab-like specification that this Schedule will use to take snapshots. + "prefix": "A String", # A string to prefix names of snapshots created under this Schedule. + "retentionCount": 42, # The maximum number of snapshots to retain under this Schedule. + }, + ], + "volumes": [ # The names of the Volumes this policy is associated with. + "A String", + ], +}+
delete(name, x__xgafv=None)
+ Delete removes named snapshot schedule policy + +Args: + name: string, Required. The name of the snapshot to delete. (required) + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`. +}+
get(name, x__xgafv=None)
+ Get details for a specific snapshot schedule policy + +Args: + name: string, Required. The name of the policy to retrieve. (required) + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # A snapshot schedule policy. + "description": "A String", # The description of this SnapshotSchedulePolicy. + "name": "A String", # Output only. The name of this SnapshotSchedulePolicy. + "schedules": [ # The snapshot Schedules contained in this Policy. At most 5 Schedules may be specified. + { # A snapshot schedule. + "crontabSpec": "A String", # The crontab-like specification that this Schedule will use to take snapshots. + "prefix": "A String", # A string to prefix names of snapshots created under this Schedule. + "retentionCount": 42, # The maximum number of snapshots to retain under this Schedule. + }, + ], + "volumes": [ # The names of the Volumes this policy is associated with. + "A String", + ], +}+
list(parent, pageSize=None, pageToken=None, x__xgafv=None)
+ List the snapshot schedule policies for the specified project + +Args: + parent: string, Required. The parent project containing the Snapshot Schedule Policies. (required) + pageSize: integer, The maximum number of items to return. + pageToken: string, The next_page_token value returned from a previous List request, if any. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Response for ListSnapshotSchedulePolicies. + "nextPageToken": "A String", # Token to retrieve the next page of results, or empty if there are no more results in the list. + "snapshotSchedulePolicies": [ # The snapshot schedule policies registered in this project. + { # A snapshot schedule policy. + "description": "A String", # The description of this SnapshotSchedulePolicy. + "name": "A String", # Output only. The name of this SnapshotSchedulePolicy. + "schedules": [ # The snapshot Schedules contained in this Policy. At most 5 Schedules may be specified. + { # A snapshot schedule. + "crontabSpec": "A String", # The crontab-like specification that this Schedule will use to take snapshots. + "prefix": "A String", # A string to prefix names of snapshots created under this Schedule. + "retentionCount": 42, # The maximum number of snapshots to retain under this Schedule. + }, + ], + "volumes": [ # The names of the Volumes this policy is associated with. + "A String", + ], + }, + ], +}+
list_next(previous_request, previous_response)
+ Retrieves the next page of results. + +Args: + previous_request: The request for the previous page. (required) + previous_response: The response from the request for the previous page. (required) + +Returns: + A request object that you can call 'execute()' on to request the next + page. Returns None if there are no more items in the collection. ++
patch(name, body=None, updateMask=None, x__xgafv=None)
+ Update a SnapshotSchedulePolicy. + +Args: + name: string, Output only. The name of this SnapshotSchedulePolicy. (required) + body: object, The request body. + The object takes the form of: + +{ # A snapshot schedule policy. + "description": "A String", # The description of this SnapshotSchedulePolicy. + "name": "A String", # Output only. The name of this SnapshotSchedulePolicy. + "schedules": [ # The snapshot Schedules contained in this Policy. At most 5 Schedules may be specified. + { # A snapshot schedule. + "crontabSpec": "A String", # The crontab-like specification that this Schedule will use to take snapshots. + "prefix": "A String", # A string to prefix names of snapshots created under this Schedule. + "retentionCount": 42, # The maximum number of snapshots to retain under this Schedule. + }, + ], + "volumes": [ # The names of the Volumes this policy is associated with. + "A String", + ], +} + + updateMask: string, The list of fields to update. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # A snapshot schedule policy. + "description": "A String", # The description of this SnapshotSchedulePolicy. + "name": "A String", # Output only. The name of this SnapshotSchedulePolicy. + "schedules": [ # The snapshot Schedules contained in this Policy. At most 5 Schedules may be specified. + { # A snapshot schedule. + "crontabSpec": "A String", # The crontab-like specification that this Schedule will use to take snapshots. + "prefix": "A String", # A string to prefix names of snapshots created under this Schedule. + "retentionCount": 42, # The maximum number of snapshots to retain under this Schedule. + }, + ], + "volumes": [ # The names of the Volumes this policy is associated with. + "A String", + ], +}+
+ close()
Close httplib2 connections.
+
+ create(parent, body=None, sshKeyId=None, x__xgafv=None)
Create a new SSH key registration in the specified project.
+ +Delete an SSH key registration in the specified project.
+
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)
List the public SSH keys registered for the specified project.
+
+ list_next(previous_request, previous_response)
Retrieves the next page of results.
+close()
+ Close httplib2 connections.+
create(parent, body=None, sshKeyId=None, x__xgafv=None)
+ Create a new SSH key registration in the specified project. + +Args: + parent: string, Required. The parent project containing the SSH keys. (required) + body: object, The request body. + The object takes the form of: + +{ # A public SSH key registered in the project. Used primarily for the interactive serial console feature. + "name": "A String", # Output only. The name of this SSH key. + "publicKey": "A String", # The public SSH key. +} + + sshKeyId: string, Required. The ID to use for the key, which will become the final component of the key's resource name. This value should be match the regex: [a-zA-Z0-9@.\-_]{1,64} + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # A public SSH key registered in the project. Used primarily for the interactive serial console feature. + "name": "A String", # Output only. The name of this SSH key. + "publicKey": "A String", # The public SSH key. +}+
delete(name, x__xgafv=None)
+ Delete an SSH key registration in the specified project. + +Args: + name: string, Required. The name of the SSH key to delete. (required) + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`. +}+
list(parent, pageSize=None, pageToken=None, x__xgafv=None)
+ List the public SSH keys registered for the specified project. + +Args: + parent: string, Required. The parent project containing the SSH keys. (required) + pageSize: integer, The maximum number of items to return. + pageToken: string, The next_page_token value returned from a previous List request, if any. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Response for ListSSHKeys. + "nextPageToken": "A String", # Token to retrieve the next page of results, or empty if there are no more results in the list. + "sshKeys": [ # The SSH keys registered in this project. + { # A public SSH key registered in the project. Used primarily for the interactive serial console feature. + "name": "A String", # Output only. The name of this SSH key. + "publicKey": "A String", # The public SSH key. + }, + ], +}+
list_next(previous_request, previous_response)
+ Retrieves the next page of results. + +Args: + previous_request: The request for the previous page. (required) + previous_response: The response from the request for the previous page. (required) + +Returns: + A request object that you can call 'execute()' on to request the next + page. Returns None if there are no more items in the collection. ++
searchAllIamPolicies_next(previous_request, previous_response)
Retrieves the next page of results.
+searchAllResources(scope, assetTypes=None, orderBy=None, pageSize=None, pageToken=None, query=None, readMask=None, x__xgafv=None)
Searches all Cloud resources within the specified scope, such as a project, folder, or organization. The caller must be granted the `cloudasset.assets.searchAllResources` permission on the desired scope, otherwise the request will be rejected.
searchAllResources_next(previous_request, previous_response)
searchAllResources(scope, assetTypes=None, orderBy=None, pageSize=None, pageToken=None, query=None, x__xgafv=None)
+ searchAllResources(scope, assetTypes=None, orderBy=None, pageSize=None, pageToken=None, query=None, readMask=None, x__xgafv=None)
Searches all Cloud resources within the specified scope, such as a project, folder, or organization. The caller must be granted the `cloudasset.assets.searchAllResources` permission on the desired scope, otherwise the request will be rejected. Args: @@ -1535,6 +1535,7 @@diff --git a/docs/dyn/cloudbuild_v1.projects.builds.html b/docs/dyn/cloudbuild_v1.projects.builds.html index 372f6262b84..b96a4131b2c 100644 --- a/docs/dyn/cloudbuild_v1.projects.builds.html +++ b/docs/dyn/cloudbuild_v1.projects.builds.html @@ -171,6 +171,9 @@Method Details
pageSize: integer, Optional. The page size for search result pagination. Page size is capped at 500 even if a larger value is given. If set to zero, server will pick an appropriate default. Returned results may be fewer than requested. When this happens, there could be more results as long as `next_page_token` is returned. pageToken: string, Optional. If present, then retrieve the next batch of results from the preceding call to this method. `page_token` must be the value of `next_page_token` from the previous response. The values of all other method parameters, must be identical to those in the previous call. query: string, Optional. The query statement. See [how to construct a query](https://cloud.google.com/asset-inventory/docs/searching-resources#how_to_construct_a_query) for more information. If not specified or empty, it will search all the resources within the specified `scope`. Examples: * `name:Important` to find Cloud resources whose name contains "Important" as a word. * `name=Important` to find the Cloud resource whose name is exactly "Important". * `displayName:Impor*` to find Cloud resources whose display name contains "Impor" as a prefix of any word in the field. * `location:us-west*` to find Cloud resources whose location contains both "us" and "west" as prefixes. * `labels:prod` to find Cloud resources whose labels contain "prod" as a key or value. * `labels.env:prod` to find Cloud resources that have a label "env" and its value is "prod". * `labels.env:*` to find Cloud resources that have a label "env". * `kmsKey:key` to find Cloud resources encrypted with a customer-managed encryption key whose name contains the word "key". * `state:ACTIVE` to find Cloud resources whose state contains "ACTIVE" as a word. * `NOT state:ACTIVE` to find {{gcp_name}} resources whose state doesn't contain "ACTIVE" as a word. * `createTime<1609459200` to find Cloud resources that were created before "2021-01-01 00:00:00 UTC". 1609459200 is the epoch timestamp of "2021-01-01 00:00:00 UTC" in seconds. * `updateTime>1609459200` to find Cloud resources that were updated after "2021-01-01 00:00:00 UTC". 1609459200 is the epoch timestamp of "2021-01-01 00:00:00 UTC" in seconds. * `Important` to find Cloud resources that contain "Important" as a word in any of the searchable fields. * `Impor*` to find Cloud resources that contain "Impor" as a prefix of any word in any of the searchable fields. * `Important location:(us-west1 OR global)` to find Cloud resources that contain "Important" as a word in any of the searchable fields and are also located in the "us-west1" region or the "global" location. + readMask: string, Optional. A comma-separated list of fields specifying which fields to be returned in ResourceSearchResult. Only '*' or combination of top level fields can be specified. Field names of both snake_case and camelCase are supported. Examples: `"*"`, `"name,location"`, `"name,versionedResources"`. The read_mask paths must be valid field paths listed but not limited to (both snake_case and camelCase are supported): * name * asset_type or assetType * project * display_name or displayName * description * location * labels * network_tags or networkTags * kms_key or kmsKey * create_time or createTime * update_time or updateTime * state * additional_attributes or additionalAttributes * versioned_resources or versionedResources If read_mask is not specified, all fields except versionedResources will be returned. If only '*' is specified, all fields including versionedResources will be returned. Any invalid field path will trigger INVALID_ARGUMENT error. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -1572,6 +1573,14 @@Method Details
"project": "A String", # The project that this resource belongs to, in the form of projects/{PROJECT_NUMBER}. This field is available when the resource belongs to a project. To search against `project`: * use a field query. Example: `project:12345` * use a free text query. Example: `12345` * specify the `scope` field as this project in your search request. "state": "A String", # The state of this resource. Different resources types have different state definitions that are mapped from various fields of different resource types. This field is available only when the resource's proto contains it. Example: If the resource is an instance provided by Compute Engine, its state will include PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. See `status` definition in [API Reference](https://cloud.google.com/compute/docs/reference/rest/v1/instances). If the resource is a project provided by Cloud Resource Manager, its state will include LIFECYCLE_STATE_UNSPECIFIED, ACTIVE, DELETE_REQUESTED and DELETE_IN_PROGRESS. See `lifecycleState` definition in [API Reference](https://cloud.google.com/resource-manager/reference/rest/v1/projects). To search against the `state`: * use a field query. Example: `state:RUNNING` * use a free text query. Example: `RUNNING` "updateTime": "A String", # The last update timestamp of this resource, at which the resource was last modified or deleted. The granularity is in seconds. Timestamp.nanos will always be 0. This field is available only when the resource's proto contains it. To search against `update_time`: * use a field query. - value in seconds since unix epoch. Example: `updateTime < 1609459200` - value in date string. Example: `updateTime < 2021-01-01` - value in date-time string (must be quoted). Example: `updateTime < "2021-01-01T00:00:00"` + "versionedResources": [ # Versioned resource representations of this resource. This is repeated because there could be multiple versions of resource representations during version migration. This `versioned_resources` field is not searchable. Some attributes of the resource representations are exposed in `additional_attributes` field, so as to allow users to search on them. + { # Resource representation as defined by the corresponding service providing the resource for a given API version. + "resource": { # JSON representation of the resource as defined by the corresponding service providing this resource. Example: If the resource is an instance provided by Compute Engine, this field will contain the JSON representation of the instance as defined by Compute Engine: `https://cloud.google.com/compute/docs/reference/rest/v1/instances`. You can find the resource definition for each supported resource type in this table: `https://cloud.google.com/asset-inventory/docs/supported-asset-types#searchable_asset_types` + "a_key": "", # Properties of the object. + }, + "version": "A String", # API version of the resource. Example: If the resource is an instance provided by Compute Engine v1 API as defined in `https://cloud.google.com/compute/docs/reference/rest/v1/instances`, version will be "v1". + }, + ], }, ], }
Returns the triggers Resource.
+
+ workerPools()
+
Returns the workerPools Resource.
+Close httplib2 connections.
diff --git a/docs/dyn/cloudbuild_v1.projects.locations.triggers.html b/docs/dyn/cloudbuild_v1.projects.locations.triggers.html index 10ad8620a15..3fcbeba9228 100644 --- a/docs/dyn/cloudbuild_v1.projects.locations.triggers.html +++ b/docs/dyn/cloudbuild_v1.projects.locations.triggers.html @@ -117,6 +117,7 @@
+ close()
Close httplib2 connections.
+
+ create(parent, body=None, validateOnly=None, workerPoolId=None, x__xgafv=None)
Creates a `WorkerPool`.
+
+ delete(name, allowMissing=None, etag=None, validateOnly=None, x__xgafv=None)
Deletes a `WorkerPool`.
+ +Returns details of a `WorkerPool`.
+
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)
Lists `WorkerPool`s.
+
+ list_next(previous_request, previous_response)
Retrieves the next page of results.
+
+ patch(name, body=None, updateMask=None, validateOnly=None, x__xgafv=None)
Updates a `WorkerPool`.
+close()
+ Close httplib2 connections.+
create(parent, body=None, validateOnly=None, workerPoolId=None, x__xgafv=None)
+ Creates a `WorkerPool`. + +Args: + parent: string, Required. The parent resource where this worker pool will be created. Format: `projects/{project}/locations/{location}`. (required) + body: object, The request body. + The object takes the form of: + +{ # Configuration for a `WorkerPool`. Cloud Build owns and maintains a pool of workers for general use and have no access to a project's private network. By default, builds submitted to Cloud Build will use a worker from this pool. If your build needs access to resources on a private network, create and use a `WorkerPool` to run your builds. Private `WorkerPool`s give your builds access to any single VPC network that you administer, including any on-prem resources connected to that VPC network. For an overview of custom worker pools, see [Custom workers overview](https://cloud.google.com/cloud-build/docs/custom-workers/custom-workers-overview). + "annotations": { # User specified annotations. See https://google.aip.dev/128#annotations for more details such as format and size limitations. + "a_key": "A String", + }, + "createTime": "A String", # Output only. Time at which the request to create the `WorkerPool` was received. + "deleteTime": "A String", # Output only. Time at which the request to delete the `WorkerPool` was received. + "displayName": "A String", # A user-specified, human-readable name for the `WorkerPool`. If provided, this value must be 1-63 characters. + "etag": "A String", # Output only. Checksum computed by the server. May be sent on update and delete requests to ensure that the client has an up-to-date value before proceeding. + "name": "A String", # Output only. The resource name of the `WorkerPool`, with format `projects/{project}/locations/{location}/workerPools/{worker_pool}`. The value of `{worker_pool}` is provided by `worker_pool_id` in `CreateWorkerPool` request and the value of `{location}` is determined by the endpoint accessed. + "privatePoolV1Config": { # Configuration for a V1 `PrivatePool`. # Private Pool using a v1 configuration. + "networkConfig": { # Defines the network configuration for the pool. # Network configuration for the pool. + "egressOption": "A String", # Option to configure network egress for the workers. + "peeredNetwork": "A String", # Required. Immutable. The network definition that the workers are peered to. If this section is left empty, the workers will be peered to `WorkerPool.project_id` on the service producer network. Must be in the format `projects/{project}/global/networks/{network}`, where `{project}` is a project number, such as `12345`, and `{network}` is the name of a VPC network in the project. See [Understanding network configuration options](https://cloud.google.com/cloud-build/docs/custom-workers/set-up-custom-worker-pool-environment#understanding_the_network_configuration_options) + }, + "workerConfig": { # Defines the configuration to be used for creating workers in the pool. # Machine configuration for the workers in the pool. + "diskSizeGb": "A String", # Size of the disk attached to the worker, in GB. See [Worker pool config file](https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. + "machineType": "A String", # Machine type of a worker, such as `e2-medium`. See [Worker pool config file](https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use a sensible default. + }, + }, + "state": "A String", # Output only. `WorkerPool` state. + "uid": "A String", # Output only. A unique identifier for the `WorkerPool`. + "updateTime": "A String", # Output only. Time at which the request to update the `WorkerPool` was received. +} + + validateOnly: boolean, If set, validate the request and preview the response, but do not actually post it. + workerPoolId: string, Required. Immutable. The ID to use for the `WorkerPool`, which will become the final component of the resource name. This value should be 1-63 characters, and valid characters are /a-z-/. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # This resource represents a long-running operation that is the result of a network API call. + "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available. + "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`. + "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, +}+
delete(name, allowMissing=None, etag=None, validateOnly=None, x__xgafv=None)
+ Deletes a `WorkerPool`. + +Args: + name: string, Required. The name of the `WorkerPool` to delete. Format: `projects/{project}/locations/{workerPool}/workerPools/{workerPool}`. (required) + allowMissing: boolean, If set to true, and the `WorkerPool` is not found, the request will succeed but no action will be taken on the server. + etag: string, Optional. If this is provided, it must match the server's etag on the workerpool for the request to be processed. + validateOnly: boolean, If set, validate the request and preview the response, but do not actually post it. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # This resource represents a long-running operation that is the result of a network API call. + "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available. + "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`. + "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, +}+
get(name, x__xgafv=None)
+ Returns details of a `WorkerPool`. + +Args: + name: string, Required. The name of the `WorkerPool` to retrieve. Format: `projects/{project}/locations/{location}/workerPools/{workerPool}`. (required) + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Configuration for a `WorkerPool`. Cloud Build owns and maintains a pool of workers for general use and have no access to a project's private network. By default, builds submitted to Cloud Build will use a worker from this pool. If your build needs access to resources on a private network, create and use a `WorkerPool` to run your builds. Private `WorkerPool`s give your builds access to any single VPC network that you administer, including any on-prem resources connected to that VPC network. For an overview of custom worker pools, see [Custom workers overview](https://cloud.google.com/cloud-build/docs/custom-workers/custom-workers-overview). + "annotations": { # User specified annotations. See https://google.aip.dev/128#annotations for more details such as format and size limitations. + "a_key": "A String", + }, + "createTime": "A String", # Output only. Time at which the request to create the `WorkerPool` was received. + "deleteTime": "A String", # Output only. Time at which the request to delete the `WorkerPool` was received. + "displayName": "A String", # A user-specified, human-readable name for the `WorkerPool`. If provided, this value must be 1-63 characters. + "etag": "A String", # Output only. Checksum computed by the server. May be sent on update and delete requests to ensure that the client has an up-to-date value before proceeding. + "name": "A String", # Output only. The resource name of the `WorkerPool`, with format `projects/{project}/locations/{location}/workerPools/{worker_pool}`. The value of `{worker_pool}` is provided by `worker_pool_id` in `CreateWorkerPool` request and the value of `{location}` is determined by the endpoint accessed. + "privatePoolV1Config": { # Configuration for a V1 `PrivatePool`. # Private Pool using a v1 configuration. + "networkConfig": { # Defines the network configuration for the pool. # Network configuration for the pool. + "egressOption": "A String", # Option to configure network egress for the workers. + "peeredNetwork": "A String", # Required. Immutable. The network definition that the workers are peered to. If this section is left empty, the workers will be peered to `WorkerPool.project_id` on the service producer network. Must be in the format `projects/{project}/global/networks/{network}`, where `{project}` is a project number, such as `12345`, and `{network}` is the name of a VPC network in the project. See [Understanding network configuration options](https://cloud.google.com/cloud-build/docs/custom-workers/set-up-custom-worker-pool-environment#understanding_the_network_configuration_options) + }, + "workerConfig": { # Defines the configuration to be used for creating workers in the pool. # Machine configuration for the workers in the pool. + "diskSizeGb": "A String", # Size of the disk attached to the worker, in GB. See [Worker pool config file](https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. + "machineType": "A String", # Machine type of a worker, such as `e2-medium`. See [Worker pool config file](https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use a sensible default. + }, + }, + "state": "A String", # Output only. `WorkerPool` state. + "uid": "A String", # Output only. A unique identifier for the `WorkerPool`. + "updateTime": "A String", # Output only. Time at which the request to update the `WorkerPool` was received. +}+
list(parent, pageSize=None, pageToken=None, x__xgafv=None)
+ Lists `WorkerPool`s. + +Args: + parent: string, Required. The parent of the collection of `WorkerPools`. Format: `projects/{project}/locations/location`. (required) + pageSize: integer, The maximum number of `WorkerPool`s to return. The service may return fewer than this value. If omitted, the server will use a sensible default. + pageToken: string, A page token, received from a previous `ListWorkerPools` call. Provide this to retrieve the subsequent page. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Response containing existing `WorkerPools`. + "nextPageToken": "A String", # Continuation token used to page through large result sets. Provide this value in a subsequent ListWorkerPoolsRequest to return the next page of results. + "workerPools": [ # `WorkerPools` for the specified project. + { # Configuration for a `WorkerPool`. Cloud Build owns and maintains a pool of workers for general use and have no access to a project's private network. By default, builds submitted to Cloud Build will use a worker from this pool. If your build needs access to resources on a private network, create and use a `WorkerPool` to run your builds. Private `WorkerPool`s give your builds access to any single VPC network that you administer, including any on-prem resources connected to that VPC network. For an overview of custom worker pools, see [Custom workers overview](https://cloud.google.com/cloud-build/docs/custom-workers/custom-workers-overview). + "annotations": { # User specified annotations. See https://google.aip.dev/128#annotations for more details such as format and size limitations. + "a_key": "A String", + }, + "createTime": "A String", # Output only. Time at which the request to create the `WorkerPool` was received. + "deleteTime": "A String", # Output only. Time at which the request to delete the `WorkerPool` was received. + "displayName": "A String", # A user-specified, human-readable name for the `WorkerPool`. If provided, this value must be 1-63 characters. + "etag": "A String", # Output only. Checksum computed by the server. May be sent on update and delete requests to ensure that the client has an up-to-date value before proceeding. + "name": "A String", # Output only. The resource name of the `WorkerPool`, with format `projects/{project}/locations/{location}/workerPools/{worker_pool}`. The value of `{worker_pool}` is provided by `worker_pool_id` in `CreateWorkerPool` request and the value of `{location}` is determined by the endpoint accessed. + "privatePoolV1Config": { # Configuration for a V1 `PrivatePool`. # Private Pool using a v1 configuration. + "networkConfig": { # Defines the network configuration for the pool. # Network configuration for the pool. + "egressOption": "A String", # Option to configure network egress for the workers. + "peeredNetwork": "A String", # Required. Immutable. The network definition that the workers are peered to. If this section is left empty, the workers will be peered to `WorkerPool.project_id` on the service producer network. Must be in the format `projects/{project}/global/networks/{network}`, where `{project}` is a project number, such as `12345`, and `{network}` is the name of a VPC network in the project. See [Understanding network configuration options](https://cloud.google.com/cloud-build/docs/custom-workers/set-up-custom-worker-pool-environment#understanding_the_network_configuration_options) + }, + "workerConfig": { # Defines the configuration to be used for creating workers in the pool. # Machine configuration for the workers in the pool. + "diskSizeGb": "A String", # Size of the disk attached to the worker, in GB. See [Worker pool config file](https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. + "machineType": "A String", # Machine type of a worker, such as `e2-medium`. See [Worker pool config file](https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use a sensible default. + }, + }, + "state": "A String", # Output only. `WorkerPool` state. + "uid": "A String", # Output only. A unique identifier for the `WorkerPool`. + "updateTime": "A String", # Output only. Time at which the request to update the `WorkerPool` was received. + }, + ], +}+
list_next(previous_request, previous_response)
+ Retrieves the next page of results. + +Args: + previous_request: The request for the previous page. (required) + previous_response: The response from the request for the previous page. (required) + +Returns: + A request object that you can call 'execute()' on to request the next + page. Returns None if there are no more items in the collection. ++
patch(name, body=None, updateMask=None, validateOnly=None, x__xgafv=None)
+ Updates a `WorkerPool`. + +Args: + name: string, Output only. The resource name of the `WorkerPool`, with format `projects/{project}/locations/{location}/workerPools/{worker_pool}`. The value of `{worker_pool}` is provided by `worker_pool_id` in `CreateWorkerPool` request and the value of `{location}` is determined by the endpoint accessed. (required) + body: object, The request body. + The object takes the form of: + +{ # Configuration for a `WorkerPool`. Cloud Build owns and maintains a pool of workers for general use and have no access to a project's private network. By default, builds submitted to Cloud Build will use a worker from this pool. If your build needs access to resources on a private network, create and use a `WorkerPool` to run your builds. Private `WorkerPool`s give your builds access to any single VPC network that you administer, including any on-prem resources connected to that VPC network. For an overview of custom worker pools, see [Custom workers overview](https://cloud.google.com/cloud-build/docs/custom-workers/custom-workers-overview). + "annotations": { # User specified annotations. See https://google.aip.dev/128#annotations for more details such as format and size limitations. + "a_key": "A String", + }, + "createTime": "A String", # Output only. Time at which the request to create the `WorkerPool` was received. + "deleteTime": "A String", # Output only. Time at which the request to delete the `WorkerPool` was received. + "displayName": "A String", # A user-specified, human-readable name for the `WorkerPool`. If provided, this value must be 1-63 characters. + "etag": "A String", # Output only. Checksum computed by the server. May be sent on update and delete requests to ensure that the client has an up-to-date value before proceeding. + "name": "A String", # Output only. The resource name of the `WorkerPool`, with format `projects/{project}/locations/{location}/workerPools/{worker_pool}`. The value of `{worker_pool}` is provided by `worker_pool_id` in `CreateWorkerPool` request and the value of `{location}` is determined by the endpoint accessed. + "privatePoolV1Config": { # Configuration for a V1 `PrivatePool`. # Private Pool using a v1 configuration. + "networkConfig": { # Defines the network configuration for the pool. # Network configuration for the pool. + "egressOption": "A String", # Option to configure network egress for the workers. + "peeredNetwork": "A String", # Required. Immutable. The network definition that the workers are peered to. If this section is left empty, the workers will be peered to `WorkerPool.project_id` on the service producer network. Must be in the format `projects/{project}/global/networks/{network}`, where `{project}` is a project number, such as `12345`, and `{network}` is the name of a VPC network in the project. See [Understanding network configuration options](https://cloud.google.com/cloud-build/docs/custom-workers/set-up-custom-worker-pool-environment#understanding_the_network_configuration_options) + }, + "workerConfig": { # Defines the configuration to be used for creating workers in the pool. # Machine configuration for the workers in the pool. + "diskSizeGb": "A String", # Size of the disk attached to the worker, in GB. See [Worker pool config file](https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. + "machineType": "A String", # Machine type of a worker, such as `e2-medium`. See [Worker pool config file](https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use a sensible default. + }, + }, + "state": "A String", # Output only. `WorkerPool` state. + "uid": "A String", # Output only. A unique identifier for the `WorkerPool`. + "updateTime": "A String", # Output only. Time at which the request to update the `WorkerPool` was received. +} + + updateMask: string, A mask specifying which fields in `worker_pool` to update. + validateOnly: boolean, If set, validate the request and preview the response, but do not actually post it. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # This resource represents a long-running operation that is the result of a network API call. + "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available. + "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`. + "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, +}+
+ add(parent, body=None, x__xgafv=None)
Add DNS peering on the given resource.
+
+ close()
Close httplib2 connections.
+
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)
List DNS peering for a given resource.
+
+ list_next(previous_request, previous_response)
Retrieves the next page of results.
+
+ remove(parent, body=None, x__xgafv=None)
Remove DNS peering on the given resource.
+add(parent, body=None, x__xgafv=None)
+ Add DNS peering on the given resource. + +Args: + parent: string, The resource on which DNS peering will be created. (required) + body: object, The request body. + The object takes the form of: + +{ # Request message to create dns peering. + "dnsPeering": { # DNS peering configuration. These configurations are used to create DNS peering with the customer Cloud DNS. # Dns peering config. + "description": "A String", # Optional. Optional description of the dns zone. + "domain": "A String", # Required. Name of the dns. + "targetNetwork": "A String", # Optional. Optional target network to which dns peering should happen. + "targetProject": "A String", # Optional. Optional target project to which dns peering should happen. + "zone": "A String", # Required. Name of the zone. + }, +} + + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Response message for set dns peering method. +}+
close()
+ Close httplib2 connections.+
list(parent, pageSize=None, pageToken=None, x__xgafv=None)
+ List DNS peering for a given resource. + +Args: + parent: string, Required. The resource on which dns peering will be listed. (required) + pageSize: integer, The maximum number of items to return. + pageToken: string, The next_page_token value to use if there are additional results to retrieve for this list request. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # List dns peering response. + "dnsPeerings": [ # List of dns peering configs. + { # DNS peering configuration. These configurations are used to create DNS peering with the customer Cloud DNS. + "description": "A String", # Optional. Optional description of the dns zone. + "domain": "A String", # Required. Name of the dns. + "targetNetwork": "A String", # Optional. Optional target network to which dns peering should happen. + "targetProject": "A String", # Optional. Optional target project to which dns peering should happen. + "zone": "A String", # Required. Name of the zone. + }, + ], + "nextPageToken": "A String", # Token to retrieve the next page of results or empty if there are no more results in the list. +}+
list_next(previous_request, previous_response)
+ Retrieves the next page of results. + +Args: + previous_request: The request for the previous page. (required) + previous_response: The response from the request for the previous page. (required) + +Returns: + A request object that you can call 'execute()' on to request the next + page. Returns None if there are no more items in the collection. ++
remove(parent, body=None, x__xgafv=None)
+ Remove DNS peering on the given resource. + +Args: + parent: string, The resource on which DNS peering will be removed. (required) + body: object, The request body. + The object takes the form of: + +{ # Request message to remove dns peering. + "zone": "A String", # Required. The zone to be removed. +} + + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Response message for set dns peering method. +}+
+ dnsPeerings()
+
Returns the dnsPeerings Resource.
+ @@ -147,6 +152,9 @@
getIamPolicy(resource, options_requestedPolicyVersion=None, x__xgafv=None)
Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
+
+ list(parent, pageSize=None, pageToken=None, view=None, x__xgafv=None)
List namespaces in a given instance
+
+ list_next(previous_request, previous_response)
Retrieves the next page of results.
setIamPolicy(resource, body=None, x__xgafv=None)
Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
@@ -140,6 +146,93 @@list(parent, pageSize=None, pageToken=None, view=None, x__xgafv=None)
+ List namespaces in a given instance + +Args: + parent: string, Required. The instance to list its namespaces. (required) + pageSize: integer, The maximum number of items to return. + pageToken: string, The next_page_token value to use if there are additional results to retrieve for this list request. + view: string, By default, only basic information about a namespace is returned (e.g. name). When `NAMESPACE_VIEW_FULL` is specified, additional information associated with a namespace gets returned (e.g. IAM policy set on the namespace) + Allowed values + NAMESPACE_VIEW_UNSPECIFIED - Default/unset value, which will use BASIC view. + NAMESPACE_VIEW_BASIC - Show the most basic metadata of a namespace + NAMESPACE_VIEW_FULL - Returns all metadata of a namespace + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # List namespaces response. + "namespaces": [ # List of namespaces + { # Represents the information of a namespace + "iamPolicy": { # IAMPolicy encapsulates the IAM policy name, definition and status of policy fetching. # IAM policy associated with this namespace. + "policy": { # An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/). # Policy definition if IAM policy fetching is successful, otherwise empty. + "auditConfigs": [ # Specifies cloud audit logging configuration for this policy. + { # Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { "log_type": "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ "user:aliya@example.com" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging, and aliya@example.com from DATA_WRITE logging. + "auditLogConfigs": [ # The configuration for logging of each type of permission. + { # Provides the configuration for logging a type of permissions. Example: { "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging. + "exemptedMembers": [ # Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members. + "A String", + ], + "logType": "A String", # The log type that this config enables. + }, + ], + "service": "A String", # Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services. + }, + ], + "bindings": [ # Associates a list of `members` to a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one member. + { # Associates `members` with a `role`. + "condition": { # Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information. # The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the members in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). + "description": "A String", # Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. + "expression": "A String", # Textual representation of an expression in Common Expression Language syntax. + "location": "A String", # Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. + "title": "A String", # Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. + }, + "members": [ # Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. + "A String", + ], + "role": "A String", # Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. + }, + ], + "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. + "version": 42, # Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). + }, + "status": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Status of iam policy fetching. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + }, + "name": "A String", # Name of the given namespace. + }, + ], + "nextPageToken": "A String", # Token to retrieve the next page of results or empty if there are no more results in the list. +}+
list_next(previous_request, previous_response)
+ Retrieves the next page of results. + +Args: + previous_request: The request for the previous page. (required) + previous_response: The response from the request for the previous page. (required) + +Returns: + A request object that you can call 'execute()' on to request the next + page. Returns None if there are no more items in the collection. ++
setIamPolicy(resource, body=None, x__xgafv=None)
Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors. diff --git a/docs/dyn/dataproc_v1.projects.regions.clusters.html b/docs/dyn/dataproc_v1.projects.regions.clusters.html index 82bb30fe735..99d21e306ea 100644 --- a/docs/dyn/dataproc_v1.projects.regions.clusters.html +++ b/docs/dyn/dataproc_v1.projects.regions.clusters.html @@ -104,6 +104,9 @@Instance Methods
Updates a cluster in a project. The returned Operation.metadata will be ClusterOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). The cluster must be in a RUNNING state or an error is returned.
++
+repair(projectId, region, clusterName, body=None, x__xgafv=None)
Repairs a cluster.
setIamPolicy(resource, body=None, x__xgafv=None)
Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.
@@ -370,7 +373,7 @@Method Details
], } - requestId: string, Optional. A unique id used to identify the request. If the server receives two CreateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. + requestId: string, Optional. A unique ID used to identify the request. If the server receives two CreateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -409,7 +412,7 @@Method Details
region: string, Required. The Dataproc region in which to handle the request. (required) clusterName: string, Required. The cluster name. (required) clusterUuid: string, Optional. Specifying the cluster_uuid means the RPC should fail (with error NOT_FOUND) if cluster with specified UUID does not exist. - requestId: string, Optional. A unique id used to identify the request. If the server receives two DeleteClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. + requestId: string, Optional. A unique ID used to identify the request. If the server receives two DeleteClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -1351,7 +1354,7 @@Method Details
} gracefulDecommissionTimeout: string, Optional. Timeout for graceful YARN decomissioning. Graceful decommissioning allows removing nodes from the cluster without interrupting jobs in progress. Timeout specifies how long to wait for jobs in progress to finish before forcefully removing nodes (and potentially interrupting jobs). Default timeout is 0 (for forceful decommission), and the maximum allowed timeout is 1 day. (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).Only supported on Dataproc image versions 1.2 and higher. - requestId: string, Optional. A unique id used to identify the request. If the server receives two UpdateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. + requestId: string, Optional. A unique ID used to identify the request. If the server receives two UpdateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. updateMask: string, Required. Specifies the path, relative to Cluster, of the field to update. For example, to change the number of workers in a cluster to 5, the update_mask parameter would be specified as config.worker_config.num_instances, and the PATCH request body would specify the new value, as follows: { "config":{ "workerConfig":{ "numInstances":"5" } } } Similarly, to change the number of preemptible workers in a cluster to 5, the update_mask parameter would be config.secondary_worker_config.num_instances, and the PATCH request body would be set as follows: { "config":{ "secondaryWorkerConfig":{ "numInstances":"5" } } } *Note:* Currently, only the following fields can be updated: *Mask* *Purpose* *labels* Update labels *config.worker_config.num_instances* Resize primary worker group *config.secondary_worker_config.num_instances* Resize secondary worker group config.autoscaling_config.policy_uri Use, stop using, or change autoscaling policies x__xgafv: string, V1 error format. Allowed values @@ -1382,6 +1385,51 @@Method Details
}
repair(projectId, region, clusterName, body=None, x__xgafv=None)
+ Repairs a cluster. + +Args: + projectId: string, Required. The ID of the Google Cloud Platform project the cluster belongs to. (required) + region: string, Required. The Dataproc region in which to handle the request. (required) + clusterName: string, Required. The cluster name. (required) + body: object, The request body. + The object takes the form of: + +{ # A request to repair a cluster. + "clusterUuid": "A String", # Optional. Specifying the cluster_uuid means the RPC will fail (with error NOT_FOUND) if a cluster with the specified UUID does not exist. + "requestId": "A String", # Optional. A unique ID used to identify the request. If the server receives two RepairClusterRequests with the same ID, the second request is ignored, and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. +} + + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # This resource represents a long-running operation that is the result of a network API call. + "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available. + "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}. + "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, +}+
setIamPolicy(resource, body=None, x__xgafv=None)
Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors. @@ -1453,7 +1501,7 @@Method Details
{ # A request to start a cluster. "clusterUuid": "A String", # Optional. Specifying the cluster_uuid means the RPC will fail (with error NOT_FOUND) if a cluster with the specified UUID does not exist. - "requestId": "A String", # Optional. A unique id used to identify the request. If the server receives two StartClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. + "requestId": "A String", # Optional. A unique ID used to identify the request. If the server receives two StartClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. } x__xgafv: string, V1 error format. @@ -1498,7 +1546,7 @@Method Details
{ # A request to stop a cluster. "clusterUuid": "A String", # Optional. Specifying the cluster_uuid means the RPC will fail (with error NOT_FOUND) if a cluster with the specified UUID does not exist. - "requestId": "A String", # Optional. A unique id used to identify the request. If the server receives two StopClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. + "requestId": "A String", # Optional. A unique ID used to identify the request. If the server receives two StopClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. } x__xgafv: string, V1 error format. diff --git a/docs/dyn/datastore_v1.projects.html b/docs/dyn/datastore_v1.projects.html index 96acf9cb890..4f71a3fd441 100644 --- a/docs/dyn/datastore_v1.projects.html +++ b/docs/dyn/datastore_v1.projects.html @@ -248,40 +248,7 @@Method Details
], }, "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. - "a_key": { # A message that can hold any of the supported value types and associated metadata. - "arrayValue": { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`. - "values": [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for 'exclude_from_indexes'. - # Object with schema name: Value - ], - }, - "blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. - "booleanValue": True or False, # A boolean value. - "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. - "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. - "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. - "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. - "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. - }, - "integerValue": "A String", # An integer value. - "keyValue": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value. - "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. - "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. - "projectId": "A String", # The ID of the project to which the entities belong. - }, - "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. - { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. - "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. - "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - }, - ], - }, - "meaning": 42, # The `meaning` field should only be populated for backwards compatibility. - "nullValue": "A String", # A null value. - "stringValue": "A String", # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes. - "timestampValue": "A String", # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down. - }, + "a_key": # Object with schema name: Value }, }, "update": { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # The entity to update. The entity must already exist. Must have a complete key path. @@ -299,40 +266,7 @@Method Details
], }, "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. - "a_key": { # A message that can hold any of the supported value types and associated metadata. - "arrayValue": { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`. - "values": [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for 'exclude_from_indexes'. - # Object with schema name: Value - ], - }, - "blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. - "booleanValue": True or False, # A boolean value. - "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. - "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. - "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. - "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. - "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. - }, - "integerValue": "A String", # An integer value. - "keyValue": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value. - "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. - "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. - "projectId": "A String", # The ID of the project to which the entities belong. - }, - "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. - { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. - "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. - "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - }, - ], - }, - "meaning": 42, # The `meaning` field should only be populated for backwards compatibility. - "nullValue": "A String", # A null value. - "stringValue": "A String", # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes. - "timestampValue": "A String", # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down. - }, + "a_key": # Object with schema name: Value }, }, "upsert": { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # The entity to upsert. The entity may or may not already exist. The entity key's final path element may be incomplete. @@ -350,40 +284,7 @@Method Details
], }, "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. - "a_key": { # A message that can hold any of the supported value types and associated metadata. - "arrayValue": { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`. - "values": [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for 'exclude_from_indexes'. - # Object with schema name: Value - ], - }, - "blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. - "booleanValue": True or False, # A boolean value. - "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. - "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. - "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. - "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. - "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. - }, - "integerValue": "A String", # An integer value. - "keyValue": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value. - "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. - "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. - "projectId": "A String", # The ID of the project to which the entities belong. - }, - "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. - { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. - "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. - "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - }, - ], - }, - "meaning": 42, # The `meaning` field should only be populated for backwards compatibility. - "nullValue": "A String", # A null value. - "stringValue": "A String", # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes. - "timestampValue": "A String", # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down. - }, + "a_key": # Object with schema name: Value }, }, }, @@ -602,40 +503,7 @@Method Details
], }, "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. - "a_key": { # A message that can hold any of the supported value types and associated metadata. - "arrayValue": { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`. - "values": [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for 'exclude_from_indexes'. - # Object with schema name: Value - ], - }, - "blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. - "booleanValue": True or False, # A boolean value. - "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. - "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. - "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. - "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. - "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. - }, - "integerValue": "A String", # An integer value. - "keyValue": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value. - "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. - "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. - "projectId": "A String", # The ID of the project to which the entities belong. - }, - "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. - { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. - "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. - "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - }, - ], - }, - "meaning": 42, # The `meaning` field should only be populated for backwards compatibility. - "nullValue": "A String", # A null value. - "stringValue": "A String", # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes. - "timestampValue": "A String", # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down. - }, + "a_key": # Object with schema name: Value }, }, "version": "A String", # The version of the entity, a strictly positive number that monotonically increases with changes to the entity. This field is set for `FULL` entity results. For missing entities in `LookupResponse`, this is the version of the snapshot that was used to look up the entity, and it is always set except for eventually consistent reads. @@ -659,40 +527,7 @@Method Details
], }, "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. - "a_key": { # A message that can hold any of the supported value types and associated metadata. - "arrayValue": { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`. - "values": [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for 'exclude_from_indexes'. - # Object with schema name: Value - ], - }, - "blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. - "booleanValue": True or False, # A boolean value. - "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. - "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. - "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. - "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. - "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. - }, - "integerValue": "A String", # An integer value. - "keyValue": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value. - "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. - "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. - "projectId": "A String", # The ID of the project to which the entities belong. - }, - "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. - { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. - "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. - "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - }, - ], - }, - "meaning": 42, # The `meaning` field should only be populated for backwards compatibility. - "nullValue": "A String", # A null value. - "stringValue": "A String", # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes. - "timestampValue": "A String", # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down. - }, + "a_key": # Object with schema name: Value }, }, "version": "A String", # The version of the entity, a strictly positive number that monotonically increases with changes to the entity. This field is set for `FULL` entity results. For missing entities in `LookupResponse`, this is the version of the snapshot that was used to look up the entity, and it is always set except for eventually consistent reads. @@ -790,7 +625,24 @@Method Details
"blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. "booleanValue": True or False, # A boolean value. "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "entityValue": { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "key": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # The entity's key. An entity must have a key, unless otherwise documented (for example, an entity in `Value.entity_value` may have no key). An entity's kind is its key path's last element's kind, or null if it has no key. + "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. + "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. + "projectId": "A String", # The ID of the project to which the entities belong. + }, + "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. + { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. + "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. + "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + }, + ], + }, + "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. + "a_key": # Object with schema name: Value + }, + }, "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. @@ -829,7 +681,24 @@Method Details
"blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. "booleanValue": True or False, # A boolean value. "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "entityValue": { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "key": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # The entity's key. An entity must have a key, unless otherwise documented (for example, an entity in `Value.entity_value` may have no key). An entity's kind is its key path's last element's kind, or null if it has no key. + "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. + "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. + "projectId": "A String", # The ID of the project to which the entities belong. + }, + "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. + { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. + "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. + "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + }, + ], + }, + "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. + "a_key": # Object with schema name: Value + }, + }, "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. @@ -890,7 +759,24 @@Method Details
"blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. "booleanValue": True or False, # A boolean value. "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "entityValue": { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "key": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # The entity's key. An entity must have a key, unless otherwise documented (for example, an entity in `Value.entity_value` may have no key). An entity's kind is its key path's last element's kind, or null if it has no key. + "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. + "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. + "projectId": "A String", # The ID of the project to which the entities belong. + }, + "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. + { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. + "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. + "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + }, + ], + }, + "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. + "a_key": # Object with schema name: Value + }, + }, "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. @@ -977,40 +863,7 @@Method Details
], }, "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. - "a_key": { # A message that can hold any of the supported value types and associated metadata. - "arrayValue": { # An array value. # An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`. - "values": [ # Values in the array. The order of values in an array is preserved as long as all values have identical settings for 'exclude_from_indexes'. - # Object with schema name: Value - ], - }, - "blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. - "booleanValue": True or False, # A boolean value. - "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. - "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. - "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. - "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. - "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. - }, - "integerValue": "A String", # An integer value. - "keyValue": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # A key value. - "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. - "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. - "projectId": "A String", # The ID of the project to which the entities belong. - }, - "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. - { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. - "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. - "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. - }, - ], - }, - "meaning": 42, # The `meaning` field should only be populated for backwards compatibility. - "nullValue": "A String", # A null value. - "stringValue": "A String", # A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes. - "timestampValue": "A String", # A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down. - }, + "a_key": # Object with schema name: Value }, }, "version": "A String", # The version of the entity, a strictly positive number that monotonically increases with changes to the entity. This field is set for `FULL` entity results. For missing entities in `LookupResponse`, this is the version of the snapshot that was used to look up the entity, and it is always set except for eventually consistent reads. @@ -1049,7 +902,24 @@Method Details
"blobValue": "A String", # A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded. "booleanValue": True or False, # A boolean value. "doubleValue": 3.14, # A double value. - "entityValue": # Object with schema name: Entity # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "entityValue": { # A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message. # An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key. + "key": { # A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts. # The entity's key. An entity must have a key, unless otherwise documented (for example, an entity in `Value.entity_value` may have no key). An entity's kind is its key path's last element's kind, or null if it has no key. + "partitionId": { # A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state. # Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition. + "namespaceId": "A String", # If not empty, the ID of the namespace to which the entities belong. + "projectId": "A String", # The ID of the project to which the entities belong. + }, + "path": [ # The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements. + { # A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete. + "id": "A String", # The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future. + "kind": "A String", # The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + "name": "A String", # The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`. + }, + ], + }, + "properties": { # The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`. + "a_key": # Object with schema name: Value + }, + }, "excludeFromIndexes": True or False, # If the value should be excluded from all indexes including those defined explicitly. "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. diff --git a/docs/dyn/dialogflow_v2.projects.agent.environments.html b/docs/dyn/dialogflow_v2.projects.agent.environments.html index d6512d6487d..47de8aefb06 100644 --- a/docs/dyn/dialogflow_v2.projects.agent.environments.html +++ b/docs/dyn/dialogflow_v2.projects.agent.environments.html @@ -127,7 +127,7 @@Method Details
The object takes the form of: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # Optional. The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -182,7 +182,7 @@Method Details
An object of the form: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # Optional. The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -261,7 +261,7 @@Method Details
An object of the form: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # Optional. The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -369,7 +369,7 @@Method Details
{ # The response message for Environments.ListEnvironments. "environments": [ # The list of agent environments. There will be a maximum number of items returned based on the page_size field in the request. { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # Optional. The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -442,7 +442,7 @@Method Details
The object takes the form of: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # Optional. The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -498,7 +498,7 @@Method Details
An object of the form: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # Optional. The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. diff --git a/docs/dyn/dialogflow_v2.projects.locations.agent.environments.html b/docs/dyn/dialogflow_v2.projects.locations.agent.environments.html index 16a24cae9cf..453186bb358 100644 --- a/docs/dyn/dialogflow_v2.projects.locations.agent.environments.html +++ b/docs/dyn/dialogflow_v2.projects.locations.agent.environments.html @@ -127,7 +127,7 @@Method Details
The object takes the form of: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # Optional. The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -182,7 +182,7 @@Method Details
An object of the form: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # Optional. The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -261,7 +261,7 @@Method Details
An object of the form: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # Optional. The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -369,7 +369,7 @@Method Details
{ # The response message for Environments.ListEnvironments. "environments": [ # The list of agent environments. There will be a maximum number of items returned based on the page_size field in the request. { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # Optional. The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -442,7 +442,7 @@Method Details
The object takes the form of: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # Optional. The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -498,7 +498,7 @@Method Details
An object of the form: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # Optional. The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. diff --git a/docs/dyn/dialogflow_v2beta1.projects.agent.environments.html b/docs/dyn/dialogflow_v2beta1.projects.agent.environments.html index 646f5f3d190..1eac35f948f 100644 --- a/docs/dyn/dialogflow_v2beta1.projects.agent.environments.html +++ b/docs/dyn/dialogflow_v2beta1.projects.agent.environments.html @@ -127,7 +127,7 @@Method Details
The object takes the form of: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -182,7 +182,7 @@Method Details
An object of the form: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -261,7 +261,7 @@Method Details
An object of the form: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -369,7 +369,7 @@Method Details
{ # The response message for Environments.ListEnvironments. "environments": [ # The list of agent environments. There will be a maximum number of items returned based on the page_size field in the request. { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -442,7 +442,7 @@Method Details
The object takes the form of: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -498,7 +498,7 @@Method Details
An object of the form: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. diff --git a/docs/dyn/dialogflow_v2beta1.projects.locations.agent.environments.html b/docs/dyn/dialogflow_v2beta1.projects.locations.agent.environments.html index 31bd1db2f69..f505c756db6 100644 --- a/docs/dyn/dialogflow_v2beta1.projects.locations.agent.environments.html +++ b/docs/dyn/dialogflow_v2beta1.projects.locations.agent.environments.html @@ -127,7 +127,7 @@Method Details
The object takes the form of: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -182,7 +182,7 @@Method Details
An object of the form: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -261,7 +261,7 @@Method Details
An object of the form: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -369,7 +369,7 @@Method Details
{ # The response message for Environments.ListEnvironments. "environments": [ # The list of agent environments. There will be a maximum number of items returned based on the page_size field in the request. { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -442,7 +442,7 @@Method Details
The object takes the form of: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. @@ -498,7 +498,7 @@Method Details
An object of the form: { # You can create multiple versions of your agent and publish them to separate environments. When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for: - testing - development - production - etc. For more information, see the [versions and environments guide](https://cloud.google.com/dialogflow/docs/agents-versions). - "agentVersion": "A String", # Required. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` + "agentVersion": "A String", # Optional. The agent version loaded into this environment. Supported formats: - `projects//agent/versions/` - `projects//locations//agent/versions/` "description": "A String", # Optional. The developer-provided description for this environment. The maximum length is 500 characters. If exceeded, the request is rejected. "fulfillment": { # By default, your agent responds to a matched intent with a static response. As an alternative, you can provide a more dynamic response by using fulfillment. When you enable fulfillment for an intent, Dialogflow responds to that intent by calling a service that you define. For example, if an end-user wants to schedule a haircut on Friday, your service can check your database and respond to the end-user with availability information for Friday. For more information, see the [fulfillment guide](https://cloud.google.com/dialogflow/docs/fulfillment-overview). # Optional. The fulfillment settings to use for this environment. "displayName": "A String", # The human-readable name of the fulfillment, unique within the agent. This field is not used for Fulfillment in an Environment. diff --git a/docs/dyn/drive_v2.drives.html b/docs/dyn/drive_v2.drives.html index 4d66097b526..ed99df2ff87 100644 --- a/docs/dyn/drive_v2.drives.html +++ b/docs/dyn/drive_v2.drives.html @@ -330,7 +330,7 @@Method Details
Lists the user's shared drives. Args: - maxResults: integer, Maximum number of shared drives to return. + maxResults: integer, Maximum number of shared drives to return per page. pageToken: string, Page token for shared drives. q: string, Query string for searching shared drives. useDomainAdminAccess: boolean, Issue the request as a domain administrator; if set to true, then all shared drives of the domain in which the requester is an administrator are returned. diff --git a/docs/dyn/drive_v3.drives.html b/docs/dyn/drive_v3.drives.html index 73825b853e3..2e72bf1e512 100644 --- a/docs/dyn/drive_v3.drives.html +++ b/docs/dyn/drive_v3.drives.html @@ -330,7 +330,7 @@Method Details
Lists the user's shared drives. Args: - pageSize: integer, Maximum number of shared drives to return. + pageSize: integer, Maximum number of shared drives to return per page. pageToken: string, Page token for shared drives. q: string, Query string for searching shared drives. useDomainAdminAccess: boolean, Issue the request as a domain administrator; if set to true, then all shared drives of the domain in which the requester is an administrator are returned. diff --git a/docs/dyn/firebaseappcheck_v1beta.projects.apps.deviceCheckConfig.html b/docs/dyn/firebaseappcheck_v1beta.projects.apps.deviceCheckConfig.html index 2523bb76e9f..667c6e9ac98 100644 --- a/docs/dyn/firebaseappcheck_v1beta.projects.apps.deviceCheckConfig.html +++ b/docs/dyn/firebaseappcheck_v1beta.projects.apps.deviceCheckConfig.html @@ -104,7 +104,7 @@Method Details
{ # Response message for the BatchGetDeviceCheckConfigs method. "configs": [ # DeviceCheckConfigs retrieved. - { # An app's DeviceCheck configuration object. This configuration is used by ExchangeDeviceCheckToken to validate device tokens issued to apps by DeviceCheck. + { # An app's DeviceCheck configuration object. This configuration is used by ExchangeDeviceCheckToken to validate device tokens issued to apps by DeviceCheck. It also controls certain properties of the returned App Check token, such as its ttl. Note that the Team ID registered with your app is used as part of the validation process. Please register it via the Firebase Console or programmatically via the [Firebase Management Service](https://firebase.google.com/docs/projects/api/reference/rest/v1beta1/projects.iosApps/patch). "keyId": "A String", # Required. The key identifier of a private key enabled with DeviceCheck, created in your Apple Developer account. "name": "A String", # Required. The relative resource name of the DeviceCheck configuration object, in the format: ``` projects/{project_number}/apps/{app_id}/deviceCheckConfig ``` "privateKey": "A String", # Required. Input only. The contents of the private key (`.p8`) file associated with the key specified by `key_id`. For security reasons, this field will never be populated in any response. @@ -133,7 +133,7 @@Method Details
Returns: An object of the form: - { # An app's DeviceCheck configuration object. This configuration is used by ExchangeDeviceCheckToken to validate device tokens issued to apps by DeviceCheck. + { # An app's DeviceCheck configuration object. This configuration is used by ExchangeDeviceCheckToken to validate device tokens issued to apps by DeviceCheck. It also controls certain properties of the returned App Check token, such as its ttl. Note that the Team ID registered with your app is used as part of the validation process. Please register it via the Firebase Console or programmatically via the [Firebase Management Service](https://firebase.google.com/docs/projects/api/reference/rest/v1beta1/projects.iosApps/patch). "keyId": "A String", # Required. The key identifier of a private key enabled with DeviceCheck, created in your Apple Developer account. "name": "A String", # Required. The relative resource name of the DeviceCheck configuration object, in the format: ``` projects/{project_number}/apps/{app_id}/deviceCheckConfig ``` "privateKey": "A String", # Required. Input only. The contents of the private key (`.p8`) file associated with the key specified by `key_id`. For security reasons, this field will never be populated in any response. @@ -150,7 +150,7 @@Method Details
body: object, The request body. The object takes the form of: -{ # An app's DeviceCheck configuration object. This configuration is used by ExchangeDeviceCheckToken to validate device tokens issued to apps by DeviceCheck. +{ # An app's DeviceCheck configuration object. This configuration is used by ExchangeDeviceCheckToken to validate device tokens issued to apps by DeviceCheck. It also controls certain properties of the returned App Check token, such as its ttl. Note that the Team ID registered with your app is used as part of the validation process. Please register it via the Firebase Console or programmatically via the [Firebase Management Service](https://firebase.google.com/docs/projects/api/reference/rest/v1beta1/projects.iosApps/patch). "keyId": "A String", # Required. The key identifier of a private key enabled with DeviceCheck, created in your Apple Developer account. "name": "A String", # Required. The relative resource name of the DeviceCheck configuration object, in the format: ``` projects/{project_number}/apps/{app_id}/deviceCheckConfig ``` "privateKey": "A String", # Required. Input only. The contents of the private key (`.p8`) file associated with the key specified by `key_id`. For security reasons, this field will never be populated in any response. @@ -166,7 +166,7 @@Method Details
Returns: An object of the form: - { # An app's DeviceCheck configuration object. This configuration is used by ExchangeDeviceCheckToken to validate device tokens issued to apps by DeviceCheck. + { # An app's DeviceCheck configuration object. This configuration is used by ExchangeDeviceCheckToken to validate device tokens issued to apps by DeviceCheck. It also controls certain properties of the returned App Check token, such as its ttl. Note that the Team ID registered with your app is used as part of the validation process. Please register it via the Firebase Console or programmatically via the [Firebase Management Service](https://firebase.google.com/docs/projects/api/reference/rest/v1beta1/projects.iosApps/patch). "keyId": "A String", # Required. The key identifier of a private key enabled with DeviceCheck, created in your Apple Developer account. "name": "A String", # Required. The relative resource name of the DeviceCheck configuration object, in the format: ``` projects/{project_number}/apps/{app_id}/deviceCheckConfig ``` "privateKey": "A String", # Required. Input only. The contents of the private key (`.p8`) file associated with the key specified by `key_id`. For security reasons, this field will never be populated in any response. diff --git a/docs/dyn/firebaseappcheck_v1beta.projects.apps.html b/docs/dyn/firebaseappcheck_v1beta.projects.apps.html index bbbdbcbe312..1ebfac46a22 100644 --- a/docs/dyn/firebaseappcheck_v1beta.projects.apps.html +++ b/docs/dyn/firebaseappcheck_v1beta.projects.apps.html @@ -174,7 +174,7 @@Method Details
Returns: An object of the form: - { # Response message for ExchangeAppAttestAttestation + { # Response message for ExchangeAppAttestAttestation and ExchangeAppAttestDebugAttestation "artifact": "A String", # An artifact that should be passed back during the Assertion flow. "attestationToken": { # Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check. # An attestation token which can be used to access Firebase APIs. "attestationToken": "A String", # An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. diff --git a/docs/dyn/firebaseappcheck_v1beta.projects.apps.recaptchaConfig.html b/docs/dyn/firebaseappcheck_v1beta.projects.apps.recaptchaConfig.html index 3b52ad55a11..b7f1a05c354 100644 --- a/docs/dyn/firebaseappcheck_v1beta.projects.apps.recaptchaConfig.html +++ b/docs/dyn/firebaseappcheck_v1beta.projects.apps.recaptchaConfig.html @@ -104,7 +104,7 @@Method Details
{ # Response message for the BatchGetRecaptchaConfigs method. "configs": [ # RecaptchaConfigs retrieved. - { # An app's reCAPTCHA v3 configuration object. This configuration is used by ExchangeRecaptchaToken to validate reCAPTCHA tokens issued to apps by reCAPTCHA v3. + { # An app's reCAPTCHA v3 configuration object. This configuration is used by ExchangeRecaptchaToken to validate reCAPTCHA tokens issued to apps by reCAPTCHA v3. It also controls certain properties of the returned App Check token, such as its ttl. "name": "A String", # Required. The relative resource name of the reCAPTCHA v3 configuration object, in the format: ``` projects/{project_number}/apps/{app_id}/recaptchaConfig ``` "siteSecret": "A String", # Required. Input only. The site secret used to identify your service for reCAPTCHA v3 verification. For security reasons, this field will never be populated in any response. "siteSecretSet": True or False, # Output only. Whether the `site_secret` field was previously set. Since we will never return the `site_secret` field, this field is the only way to find out whether it was previously set. @@ -132,7 +132,7 @@Method Details
Returns: An object of the form: - { # An app's reCAPTCHA v3 configuration object. This configuration is used by ExchangeRecaptchaToken to validate reCAPTCHA tokens issued to apps by reCAPTCHA v3. + { # An app's reCAPTCHA v3 configuration object. This configuration is used by ExchangeRecaptchaToken to validate reCAPTCHA tokens issued to apps by reCAPTCHA v3. It also controls certain properties of the returned App Check token, such as its ttl. "name": "A String", # Required. The relative resource name of the reCAPTCHA v3 configuration object, in the format: ``` projects/{project_number}/apps/{app_id}/recaptchaConfig ``` "siteSecret": "A String", # Required. Input only. The site secret used to identify your service for reCAPTCHA v3 verification. For security reasons, this field will never be populated in any response. "siteSecretSet": True or False, # Output only. Whether the `site_secret` field was previously set. Since we will never return the `site_secret` field, this field is the only way to find out whether it was previously set. @@ -148,7 +148,7 @@Method Details
body: object, The request body. The object takes the form of: -{ # An app's reCAPTCHA v3 configuration object. This configuration is used by ExchangeRecaptchaToken to validate reCAPTCHA tokens issued to apps by reCAPTCHA v3. +{ # An app's reCAPTCHA v3 configuration object. This configuration is used by ExchangeRecaptchaToken to validate reCAPTCHA tokens issued to apps by reCAPTCHA v3. It also controls certain properties of the returned App Check token, such as its ttl. "name": "A String", # Required. The relative resource name of the reCAPTCHA v3 configuration object, in the format: ``` projects/{project_number}/apps/{app_id}/recaptchaConfig ``` "siteSecret": "A String", # Required. Input only. The site secret used to identify your service for reCAPTCHA v3 verification. For security reasons, this field will never be populated in any response. "siteSecretSet": True or False, # Output only. Whether the `site_secret` field was previously set. Since we will never return the `site_secret` field, this field is the only way to find out whether it was previously set. @@ -163,7 +163,7 @@Method Details
Returns: An object of the form: - { # An app's reCAPTCHA v3 configuration object. This configuration is used by ExchangeRecaptchaToken to validate reCAPTCHA tokens issued to apps by reCAPTCHA v3. + { # An app's reCAPTCHA v3 configuration object. This configuration is used by ExchangeRecaptchaToken to validate reCAPTCHA tokens issued to apps by reCAPTCHA v3. It also controls certain properties of the returned App Check token, such as its ttl. "name": "A String", # Required. The relative resource name of the reCAPTCHA v3 configuration object, in the format: ``` projects/{project_number}/apps/{app_id}/recaptchaConfig ``` "siteSecret": "A String", # Required. Input only. The site secret used to identify your service for reCAPTCHA v3 verification. For security reasons, this field will never be populated in any response. "siteSecretSet": True or False, # Output only. Whether the `site_secret` field was previously set. Since we will never return the `site_secret` field, this field is the only way to find out whether it was previously set. diff --git a/docs/dyn/firestore_v1beta1.projects.databases.documents.html b/docs/dyn/firestore_v1beta1.projects.databases.documents.html index 352fb5224ab..712832b8a03 100644 --- a/docs/dyn/firestore_v1beta1.projects.databases.documents.html +++ b/docs/dyn/firestore_v1beta1.projects.databases.documents.html @@ -175,11 +175,7 @@Method Details
"createTime": "A String", # Output only. The time at which the document was created. This value increases monotonically when a document is deleted then recreated. It can also be compared to values from other documents and the `read_time` of a query. "fields": { # The document's fields. The map keys represent field names. A simple field name contains only characters `a` to `z`, `A` to `Z`, `0` to `9`, or `_`, and must not start with `0` to `9`. For example, `foo_bar_17`. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. Field paths may be used in other contexts to refer to structured fields defined here. For `map_value`, the field path is represented by the simple or quoted field names of the containing fields, delimited by `.`. For example, the structured field `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be represented by the field path `foo.x&y`. Within a field path, a quoted field name starts and ends with `` ` `` and may contain any character. Some characters, including `` ` ``, must be escaped using a `\`. For example, `` `x&y` `` represents `x&y` and `` `bak\`tik` `` represents `` bak`tik ``. "a_key": { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -234,16 +230,31 @@Method Details
{ # A transformation of a field of the document. "appendMissingElements": { # An array value. # Append the given elements in order if they are not already present in the current field value. If the field is not an array, or if the field does not yet exist, it is first set to the empty array. Equivalent numbers of different types (e.g. 3L and 3.0) are considered equal when checking if a value is missing. NaN is equal to NaN, and Null is equal to Null. If the input contains multiple equivalent values, only the first will be considered. The corresponding transform_result will be the null value. "values": [ # Values in the array. - # Object with schema name: Value + { # A message that can hold any of the supported value types. + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. + "booleanValue": True or False, # A boolean value. + "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. + "doubleValue": 3.14, # A double value. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "mapValue": { # A map value. # A map value. + "fields": { # The map's fields. The map keys represent field names. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. + "a_key": # Object with schema name: Value + }, + }, + "nullValue": "A String", # A null value. + "referenceValue": "A String", # A reference to a document. For example: `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + "stringValue": "A String", # A string value. The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes of the UTF-8 representation are considered by queries. + "timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. + }, ], }, "fieldPath": "A String", # The path of the field. See Document.fields for the field path syntax reference. "increment": { # A message that can hold any of the supported value types. # Adds the given value to the field's current value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the given value. If either of the given value or the current field value are doubles, both values will be interpreted as doubles. Double arithmetic and representation of double values follow IEEE 754 semantics. If there is positive/negative integer overflow, the field is resolved to the largest magnitude positive/negative integer. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -263,11 +274,7 @@Method Details
"timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. }, "maximum": { # A message that can hold any of the supported value types. # Sets the field to the maximum of its current value and the given value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the given value. If a maximum operation is applied where the field and the input value are of mixed types (that is - one is an integer and one is a double) the field takes on the type of the larger operand. If the operands are equivalent (e.g. 3 and 3.0), the field does not change. 0, 0.0, and -0.0 are all zero. The maximum of a zero stored value and zero input value is always the stored value. The maximum of any numeric value x and NaN is NaN. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -287,11 +294,7 @@Method Details
"timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. }, "minimum": { # A message that can hold any of the supported value types. # Sets the field to the minimum of its current value and the given value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the input value. If a minimum operation is applied where the field and the input value are of mixed types (that is - one is an integer and one is a double) the field takes on the type of the smaller operand. If the operands are equivalent (e.g. 3 and 3.0), the field does not change. 0, 0.0, and -0.0 are all zero. The minimum of a zero stored value and zero input value is always the stored value. The minimum of any numeric value x and NaN is NaN. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -312,7 +315,26 @@Method Details
}, "removeAllFromArray": { # An array value. # Remove all of the given elements from the array in the field. If the field is not an array, or if the field does not yet exist, it is set to the empty array. Equivalent numbers of the different types (e.g. 3L and 3.0) are considered equal when deciding whether an element should be removed. NaN is equal to NaN, and Null is equal to Null. This will remove all equivalent values if there are duplicates. The corresponding transform_result will be the null value. "values": [ # Values in the array. - # Object with schema name: Value + { # A message that can hold any of the supported value types. + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. + "booleanValue": True or False, # A boolean value. + "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. + "doubleValue": 3.14, # A double value. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "mapValue": { # A map value. # A map value. + "fields": { # The map's fields. The map keys represent field names. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. + "a_key": # Object with schema name: Value + }, + }, + "nullValue": "A String", # A null value. + "referenceValue": "A String", # A reference to a document. For example: `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + "stringValue": "A String", # A string value. The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes of the UTF-8 representation are considered by queries. + "timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. + }, ], }, "setToServerValue": "A String", # Sets the field to the given server value. @@ -323,11 +345,7 @@Method Details
"createTime": "A String", # Output only. The time at which the document was created. This value increases monotonically when a document is deleted then recreated. It can also be compared to values from other documents and the `read_time` of a query. "fields": { # The document's fields. The map keys represent field names. A simple field name contains only characters `a` to `z`, `A` to `Z`, `0` to `9`, or `_`, and must not start with `0` to `9`. For example, `foo_bar_17`. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. Field paths may be used in other contexts to refer to structured fields defined here. For `map_value`, the field path is represented by the simple or quoted field names of the containing fields, delimited by `.`. For example, the structured field `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be represented by the field path `foo.x&y`. Within a field path, a quoted field name starts and ends with `` ` `` and may contain any character. Some characters, including `` ` ``, must be escaped using a `\`. For example, `` `x&y` `` represents `x&y` and `` `bak\`tik` `` represents `` bak`tik ``. "a_key": { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -359,16 +377,31 @@Method Details
{ # A transformation of a field of the document. "appendMissingElements": { # An array value. # Append the given elements in order if they are not already present in the current field value. If the field is not an array, or if the field does not yet exist, it is first set to the empty array. Equivalent numbers of different types (e.g. 3L and 3.0) are considered equal when checking if a value is missing. NaN is equal to NaN, and Null is equal to Null. If the input contains multiple equivalent values, only the first will be considered. The corresponding transform_result will be the null value. "values": [ # Values in the array. - # Object with schema name: Value + { # A message that can hold any of the supported value types. + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. + "booleanValue": True or False, # A boolean value. + "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. + "doubleValue": 3.14, # A double value. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "mapValue": { # A map value. # A map value. + "fields": { # The map's fields. The map keys represent field names. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. + "a_key": # Object with schema name: Value + }, + }, + "nullValue": "A String", # A null value. + "referenceValue": "A String", # A reference to a document. For example: `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + "stringValue": "A String", # A string value. The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes of the UTF-8 representation are considered by queries. + "timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. + }, ], }, "fieldPath": "A String", # The path of the field. See Document.fields for the field path syntax reference. "increment": { # A message that can hold any of the supported value types. # Adds the given value to the field's current value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the given value. If either of the given value or the current field value are doubles, both values will be interpreted as doubles. Double arithmetic and representation of double values follow IEEE 754 semantics. If there is positive/negative integer overflow, the field is resolved to the largest magnitude positive/negative integer. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -388,11 +421,7 @@Method Details
"timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. }, "maximum": { # A message that can hold any of the supported value types. # Sets the field to the maximum of its current value and the given value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the given value. If a maximum operation is applied where the field and the input value are of mixed types (that is - one is an integer and one is a double) the field takes on the type of the larger operand. If the operands are equivalent (e.g. 3 and 3.0), the field does not change. 0, 0.0, and -0.0 are all zero. The maximum of a zero stored value and zero input value is always the stored value. The maximum of any numeric value x and NaN is NaN. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -412,11 +441,7 @@Method Details
"timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. }, "minimum": { # A message that can hold any of the supported value types. # Sets the field to the minimum of its current value and the given value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the input value. If a minimum operation is applied where the field and the input value are of mixed types (that is - one is an integer and one is a double) the field takes on the type of the smaller operand. If the operands are equivalent (e.g. 3 and 3.0), the field does not change. 0, 0.0, and -0.0 are all zero. The minimum of a zero stored value and zero input value is always the stored value. The minimum of any numeric value x and NaN is NaN. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -437,7 +462,26 @@Method Details
}, "removeAllFromArray": { # An array value. # Remove all of the given elements from the array in the field. If the field is not an array, or if the field does not yet exist, it is set to the empty array. Equivalent numbers of the different types (e.g. 3L and 3.0) are considered equal when deciding whether an element should be removed. NaN is equal to NaN, and Null is equal to Null. This will remove all equivalent values if there are duplicates. The corresponding transform_result will be the null value. "values": [ # Values in the array. - # Object with schema name: Value + { # A message that can hold any of the supported value types. + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. + "booleanValue": True or False, # A boolean value. + "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. + "doubleValue": 3.14, # A double value. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "mapValue": { # A map value. # A map value. + "fields": { # The map's fields. The map keys represent field names. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. + "a_key": # Object with schema name: Value + }, + }, + "nullValue": "A String", # A null value. + "referenceValue": "A String", # A reference to a document. For example: `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + "stringValue": "A String", # A string value. The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes of the UTF-8 representation are considered by queries. + "timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. + }, ], }, "setToServerValue": "A String", # Sets the field to the given server value. @@ -471,11 +515,7 @@Method Details
{ # The result of applying a write. "transformResults": [ # The results of applying each DocumentTransform.FieldTransform, in the same order. { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -563,16 +603,31 @@Method Details
{ # A transformation of a field of the document. "appendMissingElements": { # An array value. # Append the given elements in order if they are not already present in the current field value. If the field is not an array, or if the field does not yet exist, it is first set to the empty array. Equivalent numbers of different types (e.g. 3L and 3.0) are considered equal when checking if a value is missing. NaN is equal to NaN, and Null is equal to Null. If the input contains multiple equivalent values, only the first will be considered. The corresponding transform_result will be the null value. "values": [ # Values in the array. - # Object with schema name: Value + { # A message that can hold any of the supported value types. + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. + "booleanValue": True or False, # A boolean value. + "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. + "doubleValue": 3.14, # A double value. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "mapValue": { # A map value. # A map value. + "fields": { # The map's fields. The map keys represent field names. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. + "a_key": # Object with schema name: Value + }, + }, + "nullValue": "A String", # A null value. + "referenceValue": "A String", # A reference to a document. For example: `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + "stringValue": "A String", # A string value. The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes of the UTF-8 representation are considered by queries. + "timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. + }, ], }, "fieldPath": "A String", # The path of the field. See Document.fields for the field path syntax reference. "increment": { # A message that can hold any of the supported value types. # Adds the given value to the field's current value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the given value. If either of the given value or the current field value are doubles, both values will be interpreted as doubles. Double arithmetic and representation of double values follow IEEE 754 semantics. If there is positive/negative integer overflow, the field is resolved to the largest magnitude positive/negative integer. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -592,11 +647,7 @@Method Details
"timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. }, "maximum": { # A message that can hold any of the supported value types. # Sets the field to the maximum of its current value and the given value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the given value. If a maximum operation is applied where the field and the input value are of mixed types (that is - one is an integer and one is a double) the field takes on the type of the larger operand. If the operands are equivalent (e.g. 3 and 3.0), the field does not change. 0, 0.0, and -0.0 are all zero. The maximum of a zero stored value and zero input value is always the stored value. The maximum of any numeric value x and NaN is NaN. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -616,11 +667,7 @@Method Details
"timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. }, "minimum": { # A message that can hold any of the supported value types. # Sets the field to the minimum of its current value and the given value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the input value. If a minimum operation is applied where the field and the input value are of mixed types (that is - one is an integer and one is a double) the field takes on the type of the smaller operand. If the operands are equivalent (e.g. 3 and 3.0), the field does not change. 0, 0.0, and -0.0 are all zero. The minimum of a zero stored value and zero input value is always the stored value. The minimum of any numeric value x and NaN is NaN. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -641,7 +688,26 @@Method Details
}, "removeAllFromArray": { # An array value. # Remove all of the given elements from the array in the field. If the field is not an array, or if the field does not yet exist, it is set to the empty array. Equivalent numbers of the different types (e.g. 3L and 3.0) are considered equal when deciding whether an element should be removed. NaN is equal to NaN, and Null is equal to Null. This will remove all equivalent values if there are duplicates. The corresponding transform_result will be the null value. "values": [ # Values in the array. - # Object with schema name: Value + { # A message that can hold any of the supported value types. + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. + "booleanValue": True or False, # A boolean value. + "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. + "doubleValue": 3.14, # A double value. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "mapValue": { # A map value. # A map value. + "fields": { # The map's fields. The map keys represent field names. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. + "a_key": # Object with schema name: Value + }, + }, + "nullValue": "A String", # A null value. + "referenceValue": "A String", # A reference to a document. For example: `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + "stringValue": "A String", # A string value. The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes of the UTF-8 representation are considered by queries. + "timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. + }, ], }, "setToServerValue": "A String", # Sets the field to the given server value. @@ -652,11 +718,7 @@Method Details
"createTime": "A String", # Output only. The time at which the document was created. This value increases monotonically when a document is deleted then recreated. It can also be compared to values from other documents and the `read_time` of a query. "fields": { # The document's fields. The map keys represent field names. A simple field name contains only characters `a` to `z`, `A` to `Z`, `0` to `9`, or `_`, and must not start with `0` to `9`. For example, `foo_bar_17`. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. Field paths may be used in other contexts to refer to structured fields defined here. For `map_value`, the field path is represented by the simple or quoted field names of the containing fields, delimited by `.`. For example, the structured field `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be represented by the field path `foo.x&y`. Within a field path, a quoted field name starts and ends with `` ` `` and may contain any character. Some characters, including `` ` ``, must be escaped using a `\`. For example, `` `x&y` `` represents `x&y` and `` `bak\`tik` `` represents `` bak`tik ``. "a_key": { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -688,16 +750,31 @@Method Details
{ # A transformation of a field of the document. "appendMissingElements": { # An array value. # Append the given elements in order if they are not already present in the current field value. If the field is not an array, or if the field does not yet exist, it is first set to the empty array. Equivalent numbers of different types (e.g. 3L and 3.0) are considered equal when checking if a value is missing. NaN is equal to NaN, and Null is equal to Null. If the input contains multiple equivalent values, only the first will be considered. The corresponding transform_result will be the null value. "values": [ # Values in the array. - # Object with schema name: Value + { # A message that can hold any of the supported value types. + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. + "booleanValue": True or False, # A boolean value. + "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. + "doubleValue": 3.14, # A double value. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "mapValue": { # A map value. # A map value. + "fields": { # The map's fields. The map keys represent field names. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. + "a_key": # Object with schema name: Value + }, + }, + "nullValue": "A String", # A null value. + "referenceValue": "A String", # A reference to a document. For example: `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + "stringValue": "A String", # A string value. The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes of the UTF-8 representation are considered by queries. + "timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. + }, ], }, "fieldPath": "A String", # The path of the field. See Document.fields for the field path syntax reference. "increment": { # A message that can hold any of the supported value types. # Adds the given value to the field's current value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the given value. If either of the given value or the current field value are doubles, both values will be interpreted as doubles. Double arithmetic and representation of double values follow IEEE 754 semantics. If there is positive/negative integer overflow, the field is resolved to the largest magnitude positive/negative integer. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -717,11 +794,7 @@Method Details
"timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. }, "maximum": { # A message that can hold any of the supported value types. # Sets the field to the maximum of its current value and the given value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the given value. If a maximum operation is applied where the field and the input value are of mixed types (that is - one is an integer and one is a double) the field takes on the type of the larger operand. If the operands are equivalent (e.g. 3 and 3.0), the field does not change. 0, 0.0, and -0.0 are all zero. The maximum of a zero stored value and zero input value is always the stored value. The maximum of any numeric value x and NaN is NaN. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -741,11 +814,7 @@Method Details
"timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. }, "minimum": { # A message that can hold any of the supported value types. # Sets the field to the minimum of its current value and the given value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the input value. If a minimum operation is applied where the field and the input value are of mixed types (that is - one is an integer and one is a double) the field takes on the type of the smaller operand. If the operands are equivalent (e.g. 3 and 3.0), the field does not change. 0, 0.0, and -0.0 are all zero. The minimum of a zero stored value and zero input value is always the stored value. The minimum of any numeric value x and NaN is NaN. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -766,7 +835,26 @@Method Details
}, "removeAllFromArray": { # An array value. # Remove all of the given elements from the array in the field. If the field is not an array, or if the field does not yet exist, it is set to the empty array. Equivalent numbers of the different types (e.g. 3L and 3.0) are considered equal when deciding whether an element should be removed. NaN is equal to NaN, and Null is equal to Null. This will remove all equivalent values if there are duplicates. The corresponding transform_result will be the null value. "values": [ # Values in the array. - # Object with schema name: Value + { # A message that can hold any of the supported value types. + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. + "booleanValue": True or False, # A boolean value. + "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. + "doubleValue": 3.14, # A double value. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "mapValue": { # A map value. # A map value. + "fields": { # The map's fields. The map keys represent field names. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. + "a_key": # Object with schema name: Value + }, + }, + "nullValue": "A String", # A null value. + "referenceValue": "A String", # A reference to a document. For example: `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + "stringValue": "A String", # A string value. The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes of the UTF-8 representation are considered by queries. + "timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. + }, ], }, "setToServerValue": "A String", # Sets the field to the given server value. @@ -790,11 +878,7 @@Method Details
{ # The result of applying a write. "transformResults": [ # The results of applying each DocumentTransform.FieldTransform, in the same order. { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -834,11 +918,7 @@Method Details
"createTime": "A String", # Output only. The time at which the document was created. This value increases monotonically when a document is deleted then recreated. It can also be compared to values from other documents and the `read_time` of a query. "fields": { # The document's fields. The map keys represent field names. A simple field name contains only characters `a` to `z`, `A` to `Z`, `0` to `9`, or `_`, and must not start with `0` to `9`. For example, `foo_bar_17`. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. Field paths may be used in other contexts to refer to structured fields defined here. For `map_value`, the field path is represented by the simple or quoted field names of the containing fields, delimited by `.`. For example, the structured field `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be represented by the field path `foo.x&y`. Within a field path, a quoted field name starts and ends with `` ` `` and may contain any character. Some characters, including `` ` ``, must be escaped using a `\`. For example, `` `x&y` `` represents `x&y` and `` `bak\`tik` `` represents `` bak`tik ``. "a_key": { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -876,11 +956,7 @@Method Details
"createTime": "A String", # Output only. The time at which the document was created. This value increases monotonically when a document is deleted then recreated. It can also be compared to values from other documents and the `read_time` of a query. "fields": { # The document's fields. The map keys represent field names. A simple field name contains only characters `a` to `z`, `A` to `Z`, `0` to `9`, or `_`, and must not start with `0` to `9`. For example, `foo_bar_17`. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. Field paths may be used in other contexts to refer to structured fields defined here. For `map_value`, the field path is represented by the simple or quoted field names of the containing fields, delimited by `.`. For example, the structured field `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be represented by the field path `foo.x&y`. Within a field path, a quoted field name starts and ends with `` ` `` and may contain any character. Some characters, including `` ` ``, must be escaped using a `\`. For example, `` `x&y` `` represents `x&y` and `` `bak\`tik` `` represents `` bak`tik ``. "a_key": { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -946,11 +1022,7 @@Method Details
"createTime": "A String", # Output only. The time at which the document was created. This value increases monotonically when a document is deleted then recreated. It can also be compared to values from other documents and the `read_time` of a query. "fields": { # The document's fields. The map keys represent field names. A simple field name contains only characters `a` to `z`, `A` to `Z`, `0` to `9`, or `_`, and must not start with `0` to `9`. For example, `foo_bar_17`. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. Field paths may be used in other contexts to refer to structured fields defined here. For `map_value`, the field path is represented by the simple or quoted field names of the containing fields, delimited by `.`. For example, the structured field `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be represented by the field path `foo.x&y`. Within a field path, a quoted field name starts and ends with `` ` `` and may contain any character. Some characters, including `` ` ``, must be escaped using a `\`. For example, `` `x&y` `` represents `x&y` and `` `bak\`tik` `` represents `` bak`tik ``. "a_key": { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1003,11 +1075,7 @@Method Details
"createTime": "A String", # Output only. The time at which the document was created. This value increases monotonically when a document is deleted then recreated. It can also be compared to values from other documents and the `read_time` of a query. "fields": { # The document's fields. The map keys represent field names. A simple field name contains only characters `a` to `z`, `A` to `Z`, `0` to `9`, or `_`, and must not start with `0` to `9`. For example, `foo_bar_17`. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. Field paths may be used in other contexts to refer to structured fields defined here. For `map_value`, the field path is represented by the simple or quoted field names of the containing fields, delimited by `.`. For example, the structured field `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be represented by the field path `foo.x&y`. Within a field path, a quoted field name starts and ends with `` ` `` and may contain any character. Some characters, including `` ` ``, must be escaped using a `\`. For example, `` `x&y` `` represents `x&y` and `` `bak\`tik` `` represents `` bak`tik ``. "a_key": { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1117,11 +1185,7 @@Method Details
"before": True or False, # If the position is just before or just after the given values, relative to the sort order defined by the query. "values": [ # The values that represent a position, in the order they appear in the order by clause of a query. Can contain fewer values than specified in the order by clause. { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1169,11 +1233,7 @@Method Details
"before": True or False, # If the position is just before or just after the given values, relative to the sort order defined by the query. "values": [ # The values that represent a position, in the order they appear in the order by clause of a query. Can contain fewer values than specified in the order by clause. { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1207,11 +1267,7 @@Method Details
}, "op": "A String", # The operator to filter by. "value": { # A message that can hold any of the supported value types. # The value to compare to. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1264,11 +1320,7 @@Method Details
"createTime": "A String", # Output only. The time at which the document was created. This value increases monotonically when a document is deleted then recreated. It can also be compared to values from other documents and the `read_time` of a query. "fields": { # The document's fields. The map keys represent field names. A simple field name contains only characters `a` to `z`, `A` to `Z`, `0` to `9`, or `_`, and must not start with `0` to `9`. For example, `foo_bar_17`. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. Field paths may be used in other contexts to refer to structured fields defined here. For `map_value`, the field path is represented by the simple or quoted field names of the containing fields, delimited by `.`. For example, the structured field `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be represented by the field path `foo.x&y`. Within a field path, a quoted field name starts and ends with `` ` `` and may contain any character. Some characters, including `` ` ``, must be escaped using a `\`. For example, `` `x&y` `` represents `x&y` and `` `bak\`tik` `` represents `` bak`tik ``. "a_key": { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1354,11 +1406,7 @@Method Details
"before": True or False, # If the position is just before or just after the given values, relative to the sort order defined by the query. "values": [ # The values that represent a position, in the order they appear in the order by clause of a query. Can contain fewer values than specified in the order by clause. { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1406,11 +1454,7 @@Method Details
"before": True or False, # If the position is just before or just after the given values, relative to the sort order defined by the query. "values": [ # The values that represent a position, in the order they appear in the order by clause of a query. Can contain fewer values than specified in the order by clause. { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1444,11 +1488,7 @@Method Details
}, "op": "A String", # The operator to filter by. "value": { # A message that can hold any of the supported value types. # The value to compare to. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1493,11 +1533,7 @@Method Details
"before": True or False, # If the position is just before or just after the given values, relative to the sort order defined by the query. "values": [ # The values that represent a position, in the order they appear in the order by clause of a query. Can contain fewer values than specified in the order by clause. { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1549,11 +1585,7 @@Method Details
"createTime": "A String", # Output only. The time at which the document was created. This value increases monotonically when a document is deleted then recreated. It can also be compared to values from other documents and the `read_time` of a query. "fields": { # The document's fields. The map keys represent field names. A simple field name contains only characters `a` to `z`, `A` to `Z`, `0` to `9`, or `_`, and must not start with `0` to `9`. For example, `foo_bar_17`. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. Field paths may be used in other contexts to refer to structured fields defined here. For `map_value`, the field path is represented by the simple or quoted field names of the containing fields, delimited by `.`. For example, the structured field `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be represented by the field path `foo.x&y`. Within a field path, a quoted field name starts and ends with `` ` `` and may contain any character. Some characters, including `` ` ``, must be escaped using a `\`. For example, `` `x&y` `` represents `x&y` and `` `bak\`tik` `` represents `` bak`tik ``. "a_key": { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1593,11 +1625,7 @@Method Details
"createTime": "A String", # Output only. The time at which the document was created. This value increases monotonically when a document is deleted then recreated. It can also be compared to values from other documents and the `read_time` of a query. "fields": { # The document's fields. The map keys represent field names. A simple field name contains only characters `a` to `z`, `A` to `Z`, `0` to `9`, or `_`, and must not start with `0` to `9`. For example, `foo_bar_17`. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. Field paths may be used in other contexts to refer to structured fields defined here. For `map_value`, the field path is represented by the simple or quoted field names of the containing fields, delimited by `.`. For example, the structured field `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be represented by the field path `foo.x&y`. Within a field path, a quoted field name starts and ends with `` ` `` and may contain any character. Some characters, including `` ` ``, must be escaped using a `\`. For example, `` `x&y` `` represents `x&y` and `` `bak\`tik` `` represents `` bak`tik ``. "a_key": { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1671,11 +1699,7 @@Method Details
"before": True or False, # If the position is just before or just after the given values, relative to the sort order defined by the query. "values": [ # The values that represent a position, in the order they appear in the order by clause of a query. Can contain fewer values than specified in the order by clause. { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1723,11 +1747,7 @@Method Details
"before": True or False, # If the position is just before or just after the given values, relative to the sort order defined by the query. "values": [ # The values that represent a position, in the order they appear in the order by clause of a query. Can contain fewer values than specified in the order by clause. { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1761,11 +1781,7 @@Method Details
}, "op": "A String", # The operator to filter by. "value": { # A message that can hold any of the supported value types. # The value to compare to. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1809,11 +1825,7 @@Method Details
"createTime": "A String", # Output only. The time at which the document was created. This value increases monotonically when a document is deleted then recreated. It can also be compared to values from other documents and the `read_time` of a query. "fields": { # The document's fields. The map keys represent field names. A simple field name contains only characters `a` to `z`, `A` to `Z`, `0` to `9`, or `_`, and must not start with `0` to `9`. For example, `foo_bar_17`. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. Field paths may be used in other contexts to refer to structured fields defined here. For `map_value`, the field path is represented by the simple or quoted field names of the containing fields, delimited by `.`. For example, the structured field `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be represented by the field path `foo.x&y`. Within a field path, a quoted field name starts and ends with `` ` `` and may contain any character. Some characters, including `` ` ``, must be escaped using a `\`. For example, `` `x&y` `` represents `x&y` and `` `bak\`tik` `` represents `` bak`tik ``. "a_key": { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1870,16 +1882,31 @@Method Details
{ # A transformation of a field of the document. "appendMissingElements": { # An array value. # Append the given elements in order if they are not already present in the current field value. If the field is not an array, or if the field does not yet exist, it is first set to the empty array. Equivalent numbers of different types (e.g. 3L and 3.0) are considered equal when checking if a value is missing. NaN is equal to NaN, and Null is equal to Null. If the input contains multiple equivalent values, only the first will be considered. The corresponding transform_result will be the null value. "values": [ # Values in the array. - # Object with schema name: Value + { # A message that can hold any of the supported value types. + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. + "booleanValue": True or False, # A boolean value. + "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. + "doubleValue": 3.14, # A double value. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "mapValue": { # A map value. # A map value. + "fields": { # The map's fields. The map keys represent field names. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. + "a_key": # Object with schema name: Value + }, + }, + "nullValue": "A String", # A null value. + "referenceValue": "A String", # A reference to a document. For example: `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + "stringValue": "A String", # A string value. The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes of the UTF-8 representation are considered by queries. + "timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. + }, ], }, "fieldPath": "A String", # The path of the field. See Document.fields for the field path syntax reference. "increment": { # A message that can hold any of the supported value types. # Adds the given value to the field's current value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the given value. If either of the given value or the current field value are doubles, both values will be interpreted as doubles. Double arithmetic and representation of double values follow IEEE 754 semantics. If there is positive/negative integer overflow, the field is resolved to the largest magnitude positive/negative integer. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1899,11 +1926,7 @@Method Details
"timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. }, "maximum": { # A message that can hold any of the supported value types. # Sets the field to the maximum of its current value and the given value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the given value. If a maximum operation is applied where the field and the input value are of mixed types (that is - one is an integer and one is a double) the field takes on the type of the larger operand. If the operands are equivalent (e.g. 3 and 3.0), the field does not change. 0, 0.0, and -0.0 are all zero. The maximum of a zero stored value and zero input value is always the stored value. The maximum of any numeric value x and NaN is NaN. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1923,11 +1946,7 @@Method Details
"timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. }, "minimum": { # A message that can hold any of the supported value types. # Sets the field to the minimum of its current value and the given value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the input value. If a minimum operation is applied where the field and the input value are of mixed types (that is - one is an integer and one is a double) the field takes on the type of the smaller operand. If the operands are equivalent (e.g. 3 and 3.0), the field does not change. 0, 0.0, and -0.0 are all zero. The minimum of a zero stored value and zero input value is always the stored value. The minimum of any numeric value x and NaN is NaN. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1948,7 +1967,26 @@Method Details
}, "removeAllFromArray": { # An array value. # Remove all of the given elements from the array in the field. If the field is not an array, or if the field does not yet exist, it is set to the empty array. Equivalent numbers of the different types (e.g. 3L and 3.0) are considered equal when deciding whether an element should be removed. NaN is equal to NaN, and Null is equal to Null. This will remove all equivalent values if there are duplicates. The corresponding transform_result will be the null value. "values": [ # Values in the array. - # Object with schema name: Value + { # A message that can hold any of the supported value types. + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. + "booleanValue": True or False, # A boolean value. + "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. + "doubleValue": 3.14, # A double value. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "mapValue": { # A map value. # A map value. + "fields": { # The map's fields. The map keys represent field names. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. + "a_key": # Object with schema name: Value + }, + }, + "nullValue": "A String", # A null value. + "referenceValue": "A String", # A reference to a document. For example: `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + "stringValue": "A String", # A string value. The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes of the UTF-8 representation are considered by queries. + "timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. + }, ], }, "setToServerValue": "A String", # Sets the field to the given server value. @@ -1959,11 +1997,7 @@Method Details
"createTime": "A String", # Output only. The time at which the document was created. This value increases monotonically when a document is deleted then recreated. It can also be compared to values from other documents and the `read_time` of a query. "fields": { # The document's fields. The map keys represent field names. A simple field name contains only characters `a` to `z`, `A` to `Z`, `0` to `9`, or `_`, and must not start with `0` to `9`. For example, `foo_bar_17`. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. Field paths may be used in other contexts to refer to structured fields defined here. For `map_value`, the field path is represented by the simple or quoted field names of the containing fields, delimited by `.`. For example, the structured field `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be represented by the field path `foo.x&y`. Within a field path, a quoted field name starts and ends with `` ` `` and may contain any character. Some characters, including `` ` ``, must be escaped using a `\`. For example, `` `x&y` `` represents `x&y` and `` `bak\`tik` `` represents `` bak`tik ``. "a_key": { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -1995,16 +2029,31 @@Method Details
{ # A transformation of a field of the document. "appendMissingElements": { # An array value. # Append the given elements in order if they are not already present in the current field value. If the field is not an array, or if the field does not yet exist, it is first set to the empty array. Equivalent numbers of different types (e.g. 3L and 3.0) are considered equal when checking if a value is missing. NaN is equal to NaN, and Null is equal to Null. If the input contains multiple equivalent values, only the first will be considered. The corresponding transform_result will be the null value. "values": [ # Values in the array. - # Object with schema name: Value + { # A message that can hold any of the supported value types. + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. + "booleanValue": True or False, # A boolean value. + "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. + "doubleValue": 3.14, # A double value. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "mapValue": { # A map value. # A map value. + "fields": { # The map's fields. The map keys represent field names. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. + "a_key": # Object with schema name: Value + }, + }, + "nullValue": "A String", # A null value. + "referenceValue": "A String", # A reference to a document. For example: `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + "stringValue": "A String", # A string value. The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes of the UTF-8 representation are considered by queries. + "timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. + }, ], }, "fieldPath": "A String", # The path of the field. See Document.fields for the field path syntax reference. "increment": { # A message that can hold any of the supported value types. # Adds the given value to the field's current value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the given value. If either of the given value or the current field value are doubles, both values will be interpreted as doubles. Double arithmetic and representation of double values follow IEEE 754 semantics. If there is positive/negative integer overflow, the field is resolved to the largest magnitude positive/negative integer. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -2024,11 +2073,7 @@Method Details
"timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. }, "maximum": { # A message that can hold any of the supported value types. # Sets the field to the maximum of its current value and the given value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the given value. If a maximum operation is applied where the field and the input value are of mixed types (that is - one is an integer and one is a double) the field takes on the type of the larger operand. If the operands are equivalent (e.g. 3 and 3.0), the field does not change. 0, 0.0, and -0.0 are all zero. The maximum of a zero stored value and zero input value is always the stored value. The maximum of any numeric value x and NaN is NaN. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -2048,11 +2093,7 @@Method Details
"timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. }, "minimum": { # A message that can hold any of the supported value types. # Sets the field to the minimum of its current value and the given value. This must be an integer or a double value. If the field is not an integer or double, or if the field does not yet exist, the transformation will set the field to the input value. If a minimum operation is applied where the field and the input value are of mixed types (that is - one is an integer and one is a double) the field takes on the type of the smaller operand. If the operands are equivalent (e.g. 3 and 3.0), the field does not change. 0, 0.0, and -0.0 are all zero. The minimum of a zero stored value and zero input value is always the stored value. The minimum of any numeric value x and NaN is NaN. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. @@ -2073,7 +2114,26 @@Method Details
}, "removeAllFromArray": { # An array value. # Remove all of the given elements from the array in the field. If the field is not an array, or if the field does not yet exist, it is set to the empty array. Equivalent numbers of the different types (e.g. 3L and 3.0) are considered equal when deciding whether an element should be removed. NaN is equal to NaN, and Null is equal to Null. This will remove all equivalent values if there are duplicates. The corresponding transform_result will be the null value. "values": [ # Values in the array. - # Object with schema name: Value + { # A message that can hold any of the supported value types. + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. + "booleanValue": True or False, # A boolean value. + "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. + "doubleValue": 3.14, # A double value. + "geoPointValue": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # A geo point value representing a point on the surface of Earth. + "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. + "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. + }, + "integerValue": "A String", # An integer value. + "mapValue": { # A map value. # A map value. + "fields": { # The map's fields. The map keys represent field names. Field names matching the regular expression `__.*__` are reserved. Reserved field names are forbidden except in certain documented contexts. The map keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be empty. + "a_key": # Object with schema name: Value + }, + }, + "nullValue": "A String", # A null value. + "referenceValue": "A String", # A reference to a document. For example: `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + "stringValue": "A String", # A string value. The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes of the UTF-8 representation are considered by queries. + "timestampValue": "A String", # A timestamp value. Precise only to microseconds. When stored, any additional precision is rounded down. + }, ], }, "setToServerValue": "A String", # Sets the field to the given server value. @@ -2099,11 +2159,7 @@Method Details
{ # The result of applying a write. "transformResults": [ # The results of applying each DocumentTransform.FieldTransform, in the same order. { # A message that can hold any of the supported value types. - "arrayValue": { # An array value. # An array value. Cannot directly contain another array value, though can contain an map which contains another array. - "values": [ # Values in the array. - # Object with schema name: Value - ], - }, + "arrayValue": # Object with schema name: ArrayValue # An array value. Cannot directly contain another array value, though can contain an map which contains another array. "booleanValue": True or False, # A boolean value. "bytesValue": "A String", # A bytes value. Must not exceed 1 MiB - 89 bytes. Only the first 1,500 bytes are considered by queries. "doubleValue": 3.14, # A double value. diff --git a/docs/dyn/groupssettings_v1.groups.html b/docs/dyn/groupssettings_v1.groups.html index 7dd16d14eff..246b38e4313 100644 --- a/docs/dyn/groupssettings_v1.groups.html +++ b/docs/dyn/groupssettings_v1.groups.html @@ -126,6 +126,7 @@Method Details
# - true # - false "defaultMessageDenyNotificationText": "A String", # When a message is rejected, this is text for the rejection notification sent to the message's author. By default, this property is empty and has no value in the API's response body. The maximum notification text size is 10,000 characters. Note: Requires sendMessageDenyNotification property to be true. + "default_sender": "A String", # Default sender for members who can post messages as the group. Possible values are: - `DEFAULT_SELF`: By default messages will be sent from the user - `GROUP`: By default messages will be sent from the group "description": "A String", # Description of the group. This property value may be an empty string if no group description has been entered. If entered, the maximum group description is no more than 300 characters. "email": "A String", # The group's email address. This property can be updated using the Directory API. Note: Only a group owner can change a group's email address. A group manager can't do this. # When you change your group's address using the Directory API or the control panel, you are changing the address your subscribers use to send email and the web address people use to access your group. People can't reach your group by visiting the old address. @@ -402,6 +403,7 @@Method Details
# - true # - false "defaultMessageDenyNotificationText": "A String", # When a message is rejected, this is text for the rejection notification sent to the message's author. By default, this property is empty and has no value in the API's response body. The maximum notification text size is 10,000 characters. Note: Requires sendMessageDenyNotification property to be true. + "default_sender": "A String", # Default sender for members who can post messages as the group. Possible values are: - `DEFAULT_SELF`: By default messages will be sent from the user - `GROUP`: By default messages will be sent from the group "description": "A String", # Description of the group. This property value may be an empty string if no group description has been entered. If entered, the maximum group description is no more than 300 characters. "email": "A String", # The group's email address. This property can be updated using the Directory API. Note: Only a group owner can change a group's email address. A group manager can't do this. # When you change your group's address using the Directory API or the control panel, you are changing the address your subscribers use to send email and the web address people use to access your group. People can't reach your group by visiting the old address. @@ -672,6 +674,7 @@Method Details
# - true # - false "defaultMessageDenyNotificationText": "A String", # When a message is rejected, this is text for the rejection notification sent to the message's author. By default, this property is empty and has no value in the API's response body. The maximum notification text size is 10,000 characters. Note: Requires sendMessageDenyNotification property to be true. + "default_sender": "A String", # Default sender for members who can post messages as the group. Possible values are: - `DEFAULT_SELF`: By default messages will be sent from the user - `GROUP`: By default messages will be sent from the group "description": "A String", # Description of the group. This property value may be an empty string if no group description has been entered. If entered, the maximum group description is no more than 300 characters. "email": "A String", # The group's email address. This property can be updated using the Directory API. Note: Only a group owner can change a group's email address. A group manager can't do this. # When you change your group's address using the Directory API or the control panel, you are changing the address your subscribers use to send email and the web address people use to access your group. People can't reach your group by visiting the old address. @@ -948,6 +951,7 @@Method Details
# - true # - false "defaultMessageDenyNotificationText": "A String", # When a message is rejected, this is text for the rejection notification sent to the message's author. By default, this property is empty and has no value in the API's response body. The maximum notification text size is 10,000 characters. Note: Requires sendMessageDenyNotification property to be true. + "default_sender": "A String", # Default sender for members who can post messages as the group. Possible values are: - `DEFAULT_SELF`: By default messages will be sent from the user - `GROUP`: By default messages will be sent from the group "description": "A String", # Description of the group. This property value may be an empty string if no group description has been entered. If entered, the maximum group description is no more than 300 characters. "email": "A String", # The group's email address. This property can be updated using the Directory API. Note: Only a group owner can change a group's email address. A group manager can't do this. # When you change your group's address using the Directory API or the control panel, you are changing the address your subscribers use to send email and the web address people use to access your group. People can't reach your group by visiting the old address. @@ -1218,6 +1222,7 @@Method Details
# - true # - false "defaultMessageDenyNotificationText": "A String", # When a message is rejected, this is text for the rejection notification sent to the message's author. By default, this property is empty and has no value in the API's response body. The maximum notification text size is 10,000 characters. Note: Requires sendMessageDenyNotification property to be true. + "default_sender": "A String", # Default sender for members who can post messages as the group. Possible values are: - `DEFAULT_SELF`: By default messages will be sent from the user - `GROUP`: By default messages will be sent from the group "description": "A String", # Description of the group. This property value may be an empty string if no group description has been entered. If entered, the maximum group description is no more than 300 characters. "email": "A String", # The group's email address. This property can be updated using the Directory API. Note: Only a group owner can change a group's email address. A group manager can't do this. # When you change your group's address using the Directory API or the control panel, you are changing the address your subscribers use to send email and the web address people use to access your group. People can't reach your group by visiting the old address. diff --git a/docs/dyn/index.md b/docs/dyn/index.md index 8c18b630523..c8a6c4c1616 100644 --- a/docs/dyn/index.md +++ b/docs/dyn/index.md @@ -120,6 +120,7 @@ ## baremetalsolution * [v1](http://googleapis.github.io/google-api-python-client/docs/dyn/baremetalsolution_v1.html) +* [v1alpha1](http://googleapis.github.io/google-api-python-client/docs/dyn/baremetalsolution_v1alpha1.html) ## bigquery @@ -881,6 +882,7 @@ ## sqladmin +* [v1](http://googleapis.github.io/google-api-python-client/docs/dyn/sqladmin_v1.html) * [v1beta4](http://googleapis.github.io/google-api-python-client/docs/dyn/sqladmin_v1beta4.html) @@ -952,6 +954,7 @@ ## verifiedaccess * [v1](http://googleapis.github.io/google-api-python-client/docs/dyn/verifiedaccess_v1.html) +* [v2](http://googleapis.github.io/google-api-python-client/docs/dyn/verifiedaccess_v2.html) ## videointelligence diff --git a/docs/dyn/logging_v2.billingAccounts.html b/docs/dyn/logging_v2.billingAccounts.html index e5a75f7bc7e..0bd568dcb61 100644 --- a/docs/dyn/logging_v2.billingAccounts.html +++ b/docs/dyn/logging_v2.billingAccounts.html @@ -94,6 +94,11 @@Instance Methods
Returns the logs Resource.
++
+operations()
+Returns the operations Resource.
+ diff --git a/docs/dyn/logging_v2.billingAccounts.locations.html b/docs/dyn/logging_v2.billingAccounts.locations.html index 4e276e6848c..dc52ed4bcb4 100644 --- a/docs/dyn/logging_v2.billingAccounts.locations.html +++ b/docs/dyn/logging_v2.billingAccounts.locations.html @@ -79,6 +79,11 @@Instance Methods
Returns the buckets Resource.
++
+operations()
+Returns the operations Resource.
+Close httplib2 connections.
diff --git a/docs/dyn/logging_v2.billingAccounts.locations.operations.html b/docs/dyn/logging_v2.billingAccounts.locations.operations.html new file mode 100644 index 00000000000..35fbc9fc5fa --- /dev/null +++ b/docs/dyn/logging_v2.billingAccounts.locations.operations.html @@ -0,0 +1,176 @@ + + + +Cloud Logging API . billingAccounts . locations . operations
+Instance Methods
++
+cancel(name, body=None, x__xgafv=None)
Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.
++
+close()
Close httplib2 connections.
++
+list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None)
Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as "/v1/{name=users/*}/operations" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.
++
+list_next(previous_request, previous_response)
Retrieves the next page of results.
+Method Details
+++ +cancel(name, body=None, x__xgafv=None)
+Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED. + +Args: + name: string, The name of the operation resource to be cancelled. (required) + body: object, The request body. + The object takes the form of: + +{ # The request message for Operations.CancelOperation. +} + + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. +}+++ +close()
+Close httplib2 connections.+++ +list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None)
+Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as "/v1/{name=users/*}/operations" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id. + +Args: + name: string, The name of the operation's parent resource. (required) + filter: string, The standard list filter. + pageSize: integer, The standard list page size. + pageToken: string, The standard list page token. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # The response message for Operations.ListOperations. + "nextPageToken": "A String", # The standard List next-page token. + "operations": [ # A list of operations that matches the specified filter in the request. + { # This resource represents a long-running operation that is the result of a network API call. + "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available. + "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}. + "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + }, + ], +}+++ + \ No newline at end of file diff --git a/docs/dyn/logging_v2.billingAccounts.operations.html b/docs/dyn/logging_v2.billingAccounts.operations.html new file mode 100644 index 00000000000..310bf78a836 --- /dev/null +++ b/docs/dyn/logging_v2.billingAccounts.operations.html @@ -0,0 +1,124 @@ + + + +list_next(previous_request, previous_response)
+Retrieves the next page of results. + +Args: + previous_request: The request for the previous page. (required) + previous_response: The response from the request for the previous page. (required) + +Returns: + A request object that you can call 'execute()' on to request the next + page. Returns None if there are no more items in the collection. ++Cloud Logging API . billingAccounts . operations
+Instance Methods
++
+close()
Close httplib2 connections.
+ +Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
+Method Details
+++ +close()
+Close httplib2 connections.+++ + \ No newline at end of file diff --git a/docs/dyn/logging_v2.entries.html b/docs/dyn/logging_v2.entries.html index 251d06dd5b8..eca163df2e0 100644 --- a/docs/dyn/logging_v2.entries.html +++ b/docs/dyn/logging_v2.entries.html @@ -77,6 +77,9 @@get(name, x__xgafv=None)
+Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service. + +Args: + name: string, The name of the operation resource. (required) + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # This resource represents a long-running operation that is the result of a network API call. + "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available. + "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}. + "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, +}+Instance Methods
Close httplib2 connections.
++
+copy(body=None, x__xgafv=None)
Copies a set of log entries from a logging bucket to a Cloud Storage bucket.
list(body=None, x__xgafv=None)
Lists log entries. Use this method to retrieve log entries that originated from a project/folder/organization/billing account. For ways to export log entries, see Exporting Logs (https://cloud.google.com/logging/docs/export).
@@ -95,6 +98,49 @@Method Details
Close httplib2 connections.
copy(body=None, x__xgafv=None)
+ Copies a set of log entries from a logging bucket to a Cloud Storage bucket. + +Args: + body: object, The request body. + The object takes the form of: + +{ # The parameters to CopyLogEntries. + "destination": "A String", # Required. Destination to which to copy logs. + "filter": "A String", # Optional. A filter specifying which log entries to copy. The filter must be no more than 20k characters. An empty filter matches all log entries. + "name": "A String", # Required. Bucket from which to copy logs. e.g. "projects/my-project/locations/my-location/buckets/my-source-bucket +} + + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # This resource represents a long-running operation that is the result of a network API call. + "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available. + "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}. + "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, +}+
list(body=None, x__xgafv=None)
Lists log entries. Use this method to retrieve log entries that originated from a project/folder/organization/billing account. For ways to export log entries, see Exporting Logs (https://cloud.google.com/logging/docs/export). diff --git a/docs/dyn/logging_v2.folders.locations.html b/docs/dyn/logging_v2.folders.locations.html index d94bd359d08..a9072c1fe84 100644 --- a/docs/dyn/logging_v2.folders.locations.html +++ b/docs/dyn/logging_v2.folders.locations.html @@ -79,6 +79,11 @@Instance Methods
Returns the buckets Resource.
++
+operations()
+Returns the operations Resource.
+Close httplib2 connections.
diff --git a/docs/dyn/logging_v2.folders.locations.operations.html b/docs/dyn/logging_v2.folders.locations.operations.html new file mode 100644 index 00000000000..6751d73a973 --- /dev/null +++ b/docs/dyn/logging_v2.folders.locations.operations.html @@ -0,0 +1,214 @@ + + + +Cloud Logging API . folders . locations . operations
+Instance Methods
++
+cancel(name, body=None, x__xgafv=None)
Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.
++
+close()
Close httplib2 connections.
+ +Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
++
+list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None)
Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as "/v1/{name=users/*}/operations" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.
++
+list_next(previous_request, previous_response)
Retrieves the next page of results.
+Method Details
+++ +cancel(name, body=None, x__xgafv=None)
+Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED. + +Args: + name: string, The name of the operation resource to be cancelled. (required) + body: object, The request body. + The object takes the form of: + +{ # The request message for Operations.CancelOperation. +} + + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. +}+++ +close()
+Close httplib2 connections.+++ +get(name, x__xgafv=None)
+Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service. + +Args: + name: string, The name of the operation resource. (required) + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # This resource represents a long-running operation that is the result of a network API call. + "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available. + "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}. + "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, +}+++ +list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None)
+Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as "/v1/{name=users/*}/operations" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id. + +Args: + name: string, The name of the operation's parent resource. (required) + filter: string, The standard list filter. + pageSize: integer, The standard list page size. + pageToken: string, The standard list page token. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # The response message for Operations.ListOperations. + "nextPageToken": "A String", # The standard List next-page token. + "operations": [ # A list of operations that matches the specified filter in the request. + { # This resource represents a long-running operation that is the result of a network API call. + "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available. + "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}. + "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + }, + ], +}+++ + \ No newline at end of file diff --git a/docs/dyn/logging_v2.locations.html b/docs/dyn/logging_v2.locations.html index fbc439eab72..0ac52eef7f0 100644 --- a/docs/dyn/logging_v2.locations.html +++ b/docs/dyn/logging_v2.locations.html @@ -79,6 +79,11 @@list_next(previous_request, previous_response)
+Retrieves the next page of results. + +Args: + previous_request: The request for the previous page. (required) + previous_response: The response from the request for the previous page. (required) + +Returns: + A request object that you can call 'execute()' on to request the next + page. Returns None if there are no more items in the collection. ++Instance Methods
Returns the buckets Resource.
++
+operations()
+Returns the operations Resource.
+Close httplib2 connections.
diff --git a/docs/dyn/logging_v2.locations.operations.html b/docs/dyn/logging_v2.locations.operations.html new file mode 100644 index 00000000000..e25174e2d97 --- /dev/null +++ b/docs/dyn/logging_v2.locations.operations.html @@ -0,0 +1,214 @@ + + + +Cloud Logging API . locations . operations
+Instance Methods
++
+cancel(name, body=None, x__xgafv=None)
Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.
++
+close()
Close httplib2 connections.
+ +Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
++
+list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None)
Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as "/v1/{name=users/*}/operations" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.
++
+list_next(previous_request, previous_response)
Retrieves the next page of results.
+Method Details
+++ +cancel(name, body=None, x__xgafv=None)
+Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED. + +Args: + name: string, The name of the operation resource to be cancelled. (required) + body: object, The request body. + The object takes the form of: + +{ # The request message for Operations.CancelOperation. +} + + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. +}+++ +close()
+Close httplib2 connections.+++ +get(name, x__xgafv=None)
+Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service. + +Args: + name: string, The name of the operation resource. (required) + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # This resource represents a long-running operation that is the result of a network API call. + "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available. + "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}. + "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, +}+++ +list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None)
+Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as "/v1/{name=users/*}/operations" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id. + +Args: + name: string, The name of the operation's parent resource. (required) + filter: string, The standard list filter. + pageSize: integer, The standard list page size. + pageToken: string, The standard list page token. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # The response message for Operations.ListOperations. + "nextPageToken": "A String", # The standard List next-page token. + "operations": [ # A list of operations that matches the specified filter in the request. + { # This resource represents a long-running operation that is the result of a network API call. + "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available. + "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}. + "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + }, + ], +}+++ + \ No newline at end of file diff --git a/docs/dyn/logging_v2.organizations.locations.html b/docs/dyn/logging_v2.organizations.locations.html index ced3a5b26fa..f07b2a55453 100644 --- a/docs/dyn/logging_v2.organizations.locations.html +++ b/docs/dyn/logging_v2.organizations.locations.html @@ -79,6 +79,11 @@list_next(previous_request, previous_response)
+Retrieves the next page of results. + +Args: + previous_request: The request for the previous page. (required) + previous_response: The response from the request for the previous page. (required) + +Returns: + A request object that you can call 'execute()' on to request the next + page. Returns None if there are no more items in the collection. ++Instance Methods
Returns the buckets Resource.
++
+operations()
+Returns the operations Resource.
+Close httplib2 connections.
diff --git a/docs/dyn/logging_v2.organizations.locations.operations.html b/docs/dyn/logging_v2.organizations.locations.operations.html new file mode 100644 index 00000000000..1ffaa2c9328 --- /dev/null +++ b/docs/dyn/logging_v2.organizations.locations.operations.html @@ -0,0 +1,214 @@ + + + +Cloud Logging API . organizations . locations . operations
+Instance Methods
++
+cancel(name, body=None, x__xgafv=None)
Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.
++
+close()
Close httplib2 connections.
+ +Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
++
+list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None)
Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as "/v1/{name=users/*}/operations" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.
++
+list_next(previous_request, previous_response)
Retrieves the next page of results.
+Method Details
+++ +cancel(name, body=None, x__xgafv=None)
+Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED. + +Args: + name: string, The name of the operation resource to be cancelled. (required) + body: object, The request body. + The object takes the form of: + +{ # The request message for Operations.CancelOperation. +} + + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. +}+++ +close()
+Close httplib2 connections.+++ +get(name, x__xgafv=None)
+Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service. + +Args: + name: string, The name of the operation resource. (required) + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # This resource represents a long-running operation that is the result of a network API call. + "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available. + "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}. + "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, +}+++ +list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None)
+Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as "/v1/{name=users/*}/operations" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id. + +Args: + name: string, The name of the operation's parent resource. (required) + filter: string, The standard list filter. + pageSize: integer, The standard list page size. + pageToken: string, The standard list page token. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # The response message for Operations.ListOperations. + "nextPageToken": "A String", # The standard List next-page token. + "operations": [ # A list of operations that matches the specified filter in the request. + { # This resource represents a long-running operation that is the result of a network API call. + "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available. + "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}. + "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + }, + ], +}+++ + \ No newline at end of file diff --git a/docs/dyn/logging_v2.projects.locations.html b/docs/dyn/logging_v2.projects.locations.html index ee7fbd93a52..37033159ba2 100644 --- a/docs/dyn/logging_v2.projects.locations.html +++ b/docs/dyn/logging_v2.projects.locations.html @@ -79,6 +79,11 @@list_next(previous_request, previous_response)
+Retrieves the next page of results. + +Args: + previous_request: The request for the previous page. (required) + previous_response: The response from the request for the previous page. (required) + +Returns: + A request object that you can call 'execute()' on to request the next + page. Returns None if there are no more items in the collection. ++Instance Methods
Returns the buckets Resource.
++
+operations()
+Returns the operations Resource.
+Close httplib2 connections.
diff --git a/docs/dyn/logging_v2.projects.locations.operations.html b/docs/dyn/logging_v2.projects.locations.operations.html new file mode 100644 index 00000000000..05bd20ba0d2 --- /dev/null +++ b/docs/dyn/logging_v2.projects.locations.operations.html @@ -0,0 +1,214 @@ + + + +Cloud Logging API . projects . locations . operations
+Instance Methods
++
+cancel(name, body=None, x__xgafv=None)
Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.
++
+close()
Close httplib2 connections.
+ +Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
++
+list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None)
Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as "/v1/{name=users/*}/operations" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.
++
+list_next(previous_request, previous_response)
Retrieves the next page of results.
+Method Details
+++ +cancel(name, body=None, x__xgafv=None)
+Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED. + +Args: + name: string, The name of the operation resource to be cancelled. (required) + body: object, The request body. + The object takes the form of: + +{ # The request message for Operations.CancelOperation. +} + + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. +}+++ +close()
+Close httplib2 connections.+++ +get(name, x__xgafv=None)
+Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service. + +Args: + name: string, The name of the operation resource. (required) + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # This resource represents a long-running operation that is the result of a network API call. + "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available. + "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}. + "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, +}+++ +list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None)
+Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as "/v1/{name=users/*}/operations" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id. + +Args: + name: string, The name of the operation's parent resource. (required) + filter: string, The standard list filter. + pageSize: integer, The standard list page size. + pageToken: string, The standard list page token. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # The response message for Operations.ListOperations. + "nextPageToken": "A String", # The standard List next-page token. + "operations": [ # A list of operations that matches the specified filter in the request. + { # This resource represents a long-running operation that is the result of a network API call. + "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available. + "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}. + "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse. + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + }, + ], +}+++ + \ No newline at end of file diff --git a/docs/dyn/monitoring_v1.projects.dashboards.html b/docs/dyn/monitoring_v1.projects.dashboards.html index 7146c53c1fe..5456b9020fe 100644 --- a/docs/dyn/monitoring_v1.projects.dashboards.html +++ b/docs/dyn/monitoring_v1.projects.dashboards.html @@ -117,6 +117,9 @@list_next(previous_request, previous_response)
+Retrieves the next page of results. + +Args: + previous_request: The request for the previous page. (required) + previous_response: The response from the request for the previous page. (required) + +Returns: + A request object that you can call 'execute()' on to request the next + page. Returns None if there are no more items in the collection. ++Method Details
"weight": "A String", # The relative weight of this column. The column weight is used to adjust the width of columns on the screen (relative to peers). Greater the weight, greater the width of the column on the screen. If omitted, a value of 1 is used while rendering. "widgets": [ # The display widgets arranged vertically in this column. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -328,6 +331,9 @@Method Details
"columns": "A String", # The number of columns into which the view's width is divided. If omitted or set to zero, a system default will be used while rendering. "widgets": [ # The informational elements that are arranged into the columns row-first. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -537,6 +543,9 @@Method Details
{ # A single tile in the mosaic. The placement and size of the tile are configurable. "height": 42, # The height of the tile, measured in grid blocks. Tiles must have a minimum height of 1. "widget": { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. # The informational widget contained in the tile. For example an XyChart. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -751,6 +760,9 @@Method Details
"weight": "A String", # The relative weight of this row. The row weight is used to adjust the height of rows on the screen (relative to peers). Greater the weight, greater the height of the row on the screen. If omitted, a value of 1 is used while rendering. "widgets": [ # The display widgets arranged horizontally in this row. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -974,6 +986,9 @@Method Details
"weight": "A String", # The relative weight of this column. The column weight is used to adjust the width of columns on the screen (relative to peers). Greater the weight, greater the width of the column on the screen. If omitted, a value of 1 is used while rendering. "widgets": [ # The display widgets arranged vertically in this column. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -1185,6 +1200,9 @@Method Details
"columns": "A String", # The number of columns into which the view's width is divided. If omitted or set to zero, a system default will be used while rendering. "widgets": [ # The informational elements that are arranged into the columns row-first. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -1394,6 +1412,9 @@Method Details
{ # A single tile in the mosaic. The placement and size of the tile are configurable. "height": 42, # The height of the tile, measured in grid blocks. Tiles must have a minimum height of 1. "widget": { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. # The informational widget contained in the tile. For example an XyChart. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -1608,6 +1629,9 @@Method Details
"weight": "A String", # The relative weight of this row. The row weight is used to adjust the height of rows on the screen (relative to peers). Greater the weight, greater the height of the row on the screen. If omitted, a value of 1 is used while rendering. "widgets": [ # The display widgets arranged horizontally in this row. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -1855,6 +1879,9 @@Method Details
"weight": "A String", # The relative weight of this column. The column weight is used to adjust the width of columns on the screen (relative to peers). Greater the weight, greater the width of the column on the screen. If omitted, a value of 1 is used while rendering. "widgets": [ # The display widgets arranged vertically in this column. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -2066,6 +2093,9 @@Method Details
"columns": "A String", # The number of columns into which the view's width is divided. If omitted or set to zero, a system default will be used while rendering. "widgets": [ # The informational elements that are arranged into the columns row-first. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -2275,6 +2305,9 @@Method Details
{ # A single tile in the mosaic. The placement and size of the tile are configurable. "height": 42, # The height of the tile, measured in grid blocks. Tiles must have a minimum height of 1. "widget": { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. # The informational widget contained in the tile. For example an XyChart. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -2489,6 +2522,9 @@Method Details
"weight": "A String", # The relative weight of this row. The row weight is used to adjust the height of rows on the screen (relative to peers). Greater the weight, greater the height of the row on the screen. If omitted, a value of 1 is used while rendering. "widgets": [ # The display widgets arranged horizontally in this row. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -2722,6 +2758,9 @@Method Details
"weight": "A String", # The relative weight of this column. The column weight is used to adjust the width of columns on the screen (relative to peers). Greater the weight, greater the width of the column on the screen. If omitted, a value of 1 is used while rendering. "widgets": [ # The display widgets arranged vertically in this column. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -2933,6 +2972,9 @@Method Details
"columns": "A String", # The number of columns into which the view's width is divided. If omitted or set to zero, a system default will be used while rendering. "widgets": [ # The informational elements that are arranged into the columns row-first. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -3142,6 +3184,9 @@Method Details
{ # A single tile in the mosaic. The placement and size of the tile are configurable. "height": 42, # The height of the tile, measured in grid blocks. Tiles must have a minimum height of 1. "widget": { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. # The informational widget contained in the tile. For example an XyChart. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -3356,6 +3401,9 @@Method Details
"weight": "A String", # The relative weight of this row. The row weight is used to adjust the height of rows on the screen (relative to peers). Greater the weight, greater the height of the row on the screen. If omitted, a value of 1 is used while rendering. "widgets": [ # The display widgets arranged horizontally in this row. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -3597,6 +3645,9 @@Method Details
"weight": "A String", # The relative weight of this column. The column weight is used to adjust the width of columns on the screen (relative to peers). Greater the weight, greater the width of the column on the screen. If omitted, a value of 1 is used while rendering. "widgets": [ # The display widgets arranged vertically in this column. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -3808,6 +3859,9 @@Method Details
"columns": "A String", # The number of columns into which the view's width is divided. If omitted or set to zero, a system default will be used while rendering. "widgets": [ # The informational elements that are arranged into the columns row-first. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -4017,6 +4071,9 @@Method Details
{ # A single tile in the mosaic. The placement and size of the tile are configurable. "height": 42, # The height of the tile, measured in grid blocks. Tiles must have a minimum height of 1. "widget": { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. # The informational widget contained in the tile. For example an XyChart. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -4231,6 +4288,9 @@Method Details
"weight": "A String", # The relative weight of this row. The row weight is used to adjust the height of rows on the screen (relative to peers). Greater the weight, greater the height of the row on the screen. If omitted, a value of 1 is used while rendering. "widgets": [ # The display widgets arranged horizontally in this row. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -4454,6 +4514,9 @@Method Details
"weight": "A String", # The relative weight of this column. The column weight is used to adjust the width of columns on the screen (relative to peers). Greater the weight, greater the width of the column on the screen. If omitted, a value of 1 is used while rendering. "widgets": [ # The display widgets arranged vertically in this column. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -4665,6 +4728,9 @@Method Details
"columns": "A String", # The number of columns into which the view's width is divided. If omitted or set to zero, a system default will be used while rendering. "widgets": [ # The informational elements that are arranged into the columns row-first. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -4874,6 +4940,9 @@Method Details
{ # A single tile in the mosaic. The placement and size of the tile are configurable. "height": 42, # The height of the tile, measured in grid blocks. Tiles must have a minimum height of 1. "widget": { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. # The informational widget contained in the tile. For example an XyChart. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. @@ -5088,6 +5157,9 @@Method Details
"weight": "A String", # The relative weight of this row. The row weight is used to adjust the height of rows on the screen (relative to peers). Greater the weight, greater the height of the row on the screen. If omitted, a value of 1 is used while rendering. "widgets": [ # The display widgets arranged horizontally in this row. { # Widget contains a single dashboard component and configuration of how to present the component in the dashboard. + "alertChart": { # A chart that displays alert policy data. # A chart of alert policy data. + "name": "A String", # Required. The resource name of the alert policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + }, "blank": { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. # A blank space. }, "scorecard": { # A widget showing the latest value of a metric, and how this value relates to one or more thresholds. # A scorecard summarizing time series data. diff --git a/docs/dyn/monitoring_v3.projects.alertPolicies.html b/docs/dyn/monitoring_v3.projects.alertPolicies.html index ac128989acd..78696e94800 100644 --- a/docs/dyn/monitoring_v3.projects.alertPolicies.html +++ b/docs/dyn/monitoring_v3.projects.alertPolicies.html @@ -111,6 +111,11 @@Method Details
The object takes the form of: { # A description of the conditions under which some aspect of your system is considered to be "unhealthy" and the ways to notify people or services about this state. For an overview of alert policies, see Introduction to Alerting (https://cloud.google.com/monitoring/alerts/). + "alertStrategy": { # Control over how the notification channels in notification_channels are notified when this alert fires. # Control over how this alert policy's notification channels are notified. + "notificationRateLimit": { # Control over the rate of notifications sent to this alert policy's notification channels. # Required for alert policies with a LogMatch condition.Providing this for alert policies that are not log-based is unimplemented. + "period": "A String", # Not more than one notification per period. + }, + }, "combiner": "A String", # How to combine the results of multiple conditions to determine if an incident should be opened. If condition_time_series_query_language is present, this must be COMBINE_UNSPECIFIED. "conditions": [ # A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. If condition_time_series_query_language is present, it must be the only condition. { # A condition is a true/false test that determines when an alerting policy should open an incident. If a condition evaluates to true, it signifies that something is wrong. @@ -132,6 +137,12 @@Method Details
"percent": 3.14, # The percentage of time series that must fail the predicate for the condition to be triggered. }, }, + "conditionMatchedLog": { # A condition type that checks whether a log message from any project monitored by the alert policy’s workspace satisfies the given filter. # A condition that checks for log messages matching given constraints. If set, no other conditions can be present. + "filter": "A String", # Required. A logs-based filter. See Advanced Logs Queries for how this filter should be constructed. + "labelExtractors": { # Optional. A map from a label key to an extractor expression, which is used to extract the value for this label key. Each entry in this map is a specification for how data should be extracted from log entries that match filter. Each combination of extracted values is treated as a separate rule for the purposes of triggering notifications. Label keys and corresponding values can be used in notifications generated by this condition.Please see the documentation on logs-based metric valueExtractors for syntax and examples. + "a_key": "A String", + }, + }, "conditionMonitoringQueryLanguage": { # A condition type that allows alert policies to be defined using Monitoring Query Language (https://cloud.google.com/monitoring/mql). # A condition that uses the Monitoring Query Language to define alerts. "duration": "A String", # The amount of time that a time series must violate the threshold to be considered failing. Currently, only values that are a multiple of a minute--e.g., 0, 60, 120, or 300 seconds--are supported. If an invalid value is given, an error will be returned. When choosing a duration, it is useful to keep in mind the frequency of the underlying time series data (which may also be affected by any alignments specified in the aggregations field); a good duration is long enough so that a single outlier does not generate spurious alerts, but short enough that unhealthy states are detected and alerted on quickly. "query": "A String", # Monitoring Query Language (https://cloud.google.com/monitoring/mql) query that outputs a boolean stream. @@ -216,6 +227,11 @@Method Details
An object of the form: { # A description of the conditions under which some aspect of your system is considered to be "unhealthy" and the ways to notify people or services about this state. For an overview of alert policies, see Introduction to Alerting (https://cloud.google.com/monitoring/alerts/). + "alertStrategy": { # Control over how the notification channels in notification_channels are notified when this alert fires. # Control over how this alert policy's notification channels are notified. + "notificationRateLimit": { # Control over the rate of notifications sent to this alert policy's notification channels. # Required for alert policies with a LogMatch condition.Providing this for alert policies that are not log-based is unimplemented. + "period": "A String", # Not more than one notification per period. + }, + }, "combiner": "A String", # How to combine the results of multiple conditions to determine if an incident should be opened. If condition_time_series_query_language is present, this must be COMBINE_UNSPECIFIED. "conditions": [ # A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. If condition_time_series_query_language is present, it must be the only condition. { # A condition is a true/false test that determines when an alerting policy should open an incident. If a condition evaluates to true, it signifies that something is wrong. @@ -237,6 +253,12 @@Method Details
"percent": 3.14, # The percentage of time series that must fail the predicate for the condition to be triggered. }, }, + "conditionMatchedLog": { # A condition type that checks whether a log message from any project monitored by the alert policy’s workspace satisfies the given filter. # A condition that checks for log messages matching given constraints. If set, no other conditions can be present. + "filter": "A String", # Required. A logs-based filter. See Advanced Logs Queries for how this filter should be constructed. + "labelExtractors": { # Optional. A map from a label key to an extractor expression, which is used to extract the value for this label key. Each entry in this map is a specification for how data should be extracted from log entries that match filter. Each combination of extracted values is treated as a separate rule for the purposes of triggering notifications. Label keys and corresponding values can be used in notifications generated by this condition.Please see the documentation on logs-based metric valueExtractors for syntax and examples. + "a_key": "A String", + }, + }, "conditionMonitoringQueryLanguage": { # A condition type that allows alert policies to be defined using Monitoring Query Language (https://cloud.google.com/monitoring/mql). # A condition that uses the Monitoring Query Language to define alerts. "duration": "A String", # The amount of time that a time series must violate the threshold to be considered failing. Currently, only values that are a multiple of a minute--e.g., 0, 60, 120, or 300 seconds--are supported. If an invalid value is given, an error will be returned. When choosing a duration, it is useful to keep in mind the frequency of the underlying time series data (which may also be affected by any alignments specified in the aggregations field); a good duration is long enough so that a single outlier does not generate spurious alerts, but short enough that unhealthy states are detected and alerted on quickly. "query": "A String", # Monitoring Query Language (https://cloud.google.com/monitoring/mql) query that outputs a boolean stream. @@ -346,6 +368,11 @@Method Details
An object of the form: { # A description of the conditions under which some aspect of your system is considered to be "unhealthy" and the ways to notify people or services about this state. For an overview of alert policies, see Introduction to Alerting (https://cloud.google.com/monitoring/alerts/). + "alertStrategy": { # Control over how the notification channels in notification_channels are notified when this alert fires. # Control over how this alert policy's notification channels are notified. + "notificationRateLimit": { # Control over the rate of notifications sent to this alert policy's notification channels. # Required for alert policies with a LogMatch condition.Providing this for alert policies that are not log-based is unimplemented. + "period": "A String", # Not more than one notification per period. + }, + }, "combiner": "A String", # How to combine the results of multiple conditions to determine if an incident should be opened. If condition_time_series_query_language is present, this must be COMBINE_UNSPECIFIED. "conditions": [ # A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. If condition_time_series_query_language is present, it must be the only condition. { # A condition is a true/false test that determines when an alerting policy should open an incident. If a condition evaluates to true, it signifies that something is wrong. @@ -367,6 +394,12 @@Method Details
"percent": 3.14, # The percentage of time series that must fail the predicate for the condition to be triggered. }, }, + "conditionMatchedLog": { # A condition type that checks whether a log message from any project monitored by the alert policy’s workspace satisfies the given filter. # A condition that checks for log messages matching given constraints. If set, no other conditions can be present. + "filter": "A String", # Required. A logs-based filter. See Advanced Logs Queries for how this filter should be constructed. + "labelExtractors": { # Optional. A map from a label key to an extractor expression, which is used to extract the value for this label key. Each entry in this map is a specification for how data should be extracted from log entries that match filter. Each combination of extracted values is treated as a separate rule for the purposes of triggering notifications. Label keys and corresponding values can be used in notifications generated by this condition.Please see the documentation on logs-based metric valueExtractors for syntax and examples. + "a_key": "A String", + }, + }, "conditionMonitoringQueryLanguage": { # A condition type that allows alert policies to be defined using Monitoring Query Language (https://cloud.google.com/monitoring/mql). # A condition that uses the Monitoring Query Language to define alerts. "duration": "A String", # The amount of time that a time series must violate the threshold to be considered failing. Currently, only values that are a multiple of a minute--e.g., 0, 60, 120, or 300 seconds--are supported. If an invalid value is given, an error will be returned. When choosing a duration, it is useful to keep in mind the frequency of the underlying time series data (which may also be affected by any alignments specified in the aggregations field); a good duration is long enough so that a single outlier does not generate spurious alerts, but short enough that unhealthy states are detected and alerted on quickly. "query": "A String", # Monitoring Query Language (https://cloud.google.com/monitoring/mql) query that outputs a boolean stream. @@ -464,6 +497,11 @@Method Details
{ # The protocol for the ListAlertPolicies response. "alertPolicies": [ # The returned alert policies. { # A description of the conditions under which some aspect of your system is considered to be "unhealthy" and the ways to notify people or services about this state. For an overview of alert policies, see Introduction to Alerting (https://cloud.google.com/monitoring/alerts/). + "alertStrategy": { # Control over how the notification channels in notification_channels are notified when this alert fires. # Control over how this alert policy's notification channels are notified. + "notificationRateLimit": { # Control over the rate of notifications sent to this alert policy's notification channels. # Required for alert policies with a LogMatch condition.Providing this for alert policies that are not log-based is unimplemented. + "period": "A String", # Not more than one notification per period. + }, + }, "combiner": "A String", # How to combine the results of multiple conditions to determine if an incident should be opened. If condition_time_series_query_language is present, this must be COMBINE_UNSPECIFIED. "conditions": [ # A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. If condition_time_series_query_language is present, it must be the only condition. { # A condition is a true/false test that determines when an alerting policy should open an incident. If a condition evaluates to true, it signifies that something is wrong. @@ -485,6 +523,12 @@Method Details
"percent": 3.14, # The percentage of time series that must fail the predicate for the condition to be triggered. }, }, + "conditionMatchedLog": { # A condition type that checks whether a log message from any project monitored by the alert policy’s workspace satisfies the given filter. # A condition that checks for log messages matching given constraints. If set, no other conditions can be present. + "filter": "A String", # Required. A logs-based filter. See Advanced Logs Queries for how this filter should be constructed. + "labelExtractors": { # Optional. A map from a label key to an extractor expression, which is used to extract the value for this label key. Each entry in this map is a specification for how data should be extracted from log entries that match filter. Each combination of extracted values is treated as a separate rule for the purposes of triggering notifications. Label keys and corresponding values can be used in notifications generated by this condition.Please see the documentation on logs-based metric valueExtractors for syntax and examples. + "a_key": "A String", + }, + }, "conditionMonitoringQueryLanguage": { # A condition type that allows alert policies to be defined using Monitoring Query Language (https://cloud.google.com/monitoring/mql). # A condition that uses the Monitoring Query Language to define alerts. "duration": "A String", # The amount of time that a time series must violate the threshold to be considered failing. Currently, only values that are a multiple of a minute--e.g., 0, 60, 120, or 300 seconds--are supported. If an invalid value is given, an error will be returned. When choosing a duration, it is useful to keep in mind the frequency of the underlying time series data (which may also be affected by any alignments specified in the aggregations field); a good duration is long enough so that a single outlier does not generate spurious alerts, but short enough that unhealthy states are detected and alerted on quickly. "query": "A String", # Monitoring Query Language (https://cloud.google.com/monitoring/mql) query that outputs a boolean stream. @@ -589,6 +633,11 @@Method Details
The object takes the form of: { # A description of the conditions under which some aspect of your system is considered to be "unhealthy" and the ways to notify people or services about this state. For an overview of alert policies, see Introduction to Alerting (https://cloud.google.com/monitoring/alerts/). + "alertStrategy": { # Control over how the notification channels in notification_channels are notified when this alert fires. # Control over how this alert policy's notification channels are notified. + "notificationRateLimit": { # Control over the rate of notifications sent to this alert policy's notification channels. # Required for alert policies with a LogMatch condition.Providing this for alert policies that are not log-based is unimplemented. + "period": "A String", # Not more than one notification per period. + }, + }, "combiner": "A String", # How to combine the results of multiple conditions to determine if an incident should be opened. If condition_time_series_query_language is present, this must be COMBINE_UNSPECIFIED. "conditions": [ # A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. If condition_time_series_query_language is present, it must be the only condition. { # A condition is a true/false test that determines when an alerting policy should open an incident. If a condition evaluates to true, it signifies that something is wrong. @@ -610,6 +659,12 @@Method Details
"percent": 3.14, # The percentage of time series that must fail the predicate for the condition to be triggered. }, }, + "conditionMatchedLog": { # A condition type that checks whether a log message from any project monitored by the alert policy’s workspace satisfies the given filter. # A condition that checks for log messages matching given constraints. If set, no other conditions can be present. + "filter": "A String", # Required. A logs-based filter. See Advanced Logs Queries for how this filter should be constructed. + "labelExtractors": { # Optional. A map from a label key to an extractor expression, which is used to extract the value for this label key. Each entry in this map is a specification for how data should be extracted from log entries that match filter. Each combination of extracted values is treated as a separate rule for the purposes of triggering notifications. Label keys and corresponding values can be used in notifications generated by this condition.Please see the documentation on logs-based metric valueExtractors for syntax and examples. + "a_key": "A String", + }, + }, "conditionMonitoringQueryLanguage": { # A condition type that allows alert policies to be defined using Monitoring Query Language (https://cloud.google.com/monitoring/mql). # A condition that uses the Monitoring Query Language to define alerts. "duration": "A String", # The amount of time that a time series must violate the threshold to be considered failing. Currently, only values that are a multiple of a minute--e.g., 0, 60, 120, or 300 seconds--are supported. If an invalid value is given, an error will be returned. When choosing a duration, it is useful to keep in mind the frequency of the underlying time series data (which may also be affected by any alignments specified in the aggregations field); a good duration is long enough so that a single outlier does not generate spurious alerts, but short enough that unhealthy states are detected and alerted on quickly. "query": "A String", # Monitoring Query Language (https://cloud.google.com/monitoring/mql) query that outputs a boolean stream. @@ -695,6 +750,11 @@Method Details
An object of the form: { # A description of the conditions under which some aspect of your system is considered to be "unhealthy" and the ways to notify people or services about this state. For an overview of alert policies, see Introduction to Alerting (https://cloud.google.com/monitoring/alerts/). + "alertStrategy": { # Control over how the notification channels in notification_channels are notified when this alert fires. # Control over how this alert policy's notification channels are notified. + "notificationRateLimit": { # Control over the rate of notifications sent to this alert policy's notification channels. # Required for alert policies with a LogMatch condition.Providing this for alert policies that are not log-based is unimplemented. + "period": "A String", # Not more than one notification per period. + }, + }, "combiner": "A String", # How to combine the results of multiple conditions to determine if an incident should be opened. If condition_time_series_query_language is present, this must be COMBINE_UNSPECIFIED. "conditions": [ # A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. If condition_time_series_query_language is present, it must be the only condition. { # A condition is a true/false test that determines when an alerting policy should open an incident. If a condition evaluates to true, it signifies that something is wrong. @@ -716,6 +776,12 @@Method Details
"percent": 3.14, # The percentage of time series that must fail the predicate for the condition to be triggered. }, }, + "conditionMatchedLog": { # A condition type that checks whether a log message from any project monitored by the alert policy’s workspace satisfies the given filter. # A condition that checks for log messages matching given constraints. If set, no other conditions can be present. + "filter": "A String", # Required. A logs-based filter. See Advanced Logs Queries for how this filter should be constructed. + "labelExtractors": { # Optional. A map from a label key to an extractor expression, which is used to extract the value for this label key. Each entry in this map is a specification for how data should be extracted from log entries that match filter. Each combination of extracted values is treated as a separate rule for the purposes of triggering notifications. Label keys and corresponding values can be used in notifications generated by this condition.Please see the documentation on logs-based metric valueExtractors for syntax and examples. + "a_key": "A String", + }, + }, "conditionMonitoringQueryLanguage": { # A condition type that allows alert policies to be defined using Monitoring Query Language (https://cloud.google.com/monitoring/mql). # A condition that uses the Monitoring Query Language to define alerts. "duration": "A String", # The amount of time that a time series must violate the threshold to be considered failing. Currently, only values that are a multiple of a minute--e.g., 0, 60, 120, or 300 seconds--are supported. If an invalid value is given, an error will be returned. When choosing a duration, it is useful to keep in mind the frequency of the underlying time series data (which may also be affected by any alignments specified in the aggregations field); a good duration is long enough so that a single outlier does not generate spurious alerts, but short enough that unhealthy states are detected and alerted on quickly. "query": "A String", # Monitoring Query Language (https://cloud.google.com/monitoring/mql) query that outputs a boolean stream. diff --git a/docs/dyn/monitoring_v3.services.html b/docs/dyn/monitoring_v3.services.html index 0a1109a7cff..5f40b23183f 100644 --- a/docs/dyn/monitoring_v3.services.html +++ b/docs/dyn/monitoring_v3.services.html @@ -145,6 +145,9 @@Method Details
"telemetry": { # Configuration for how to query telemetry on a Service. # Configuration for how to query telemetry on a Service. "resourceName": "A String", # The full name of the resource that defines this service. Formatted as described in https://cloud.google.com/apis/design/resource_names. }, + "userLabels": { # Labels which have been used to annotate the service. Label keys must start with a letter. Label keys and values may contain lowercase letters, numbers, underscores, and dashes. Label keys and values have a maximum length of 63 characters, and must be less than 128 bytes in size. Up to 64 label entries may be stored. For labels which do not have a semantic value, the empty string may be supplied for the label value. + "a_key": "A String", + }, } serviceId: string, Optional. The Service id to use for this Service. If omitted, an id will be generated instead. Must match the pattern [a-z0-9\-]+ @@ -186,6 +189,9 @@Method Details
"telemetry": { # Configuration for how to query telemetry on a Service. # Configuration for how to query telemetry on a Service. "resourceName": "A String", # The full name of the resource that defines this service. Formatted as described in https://cloud.google.com/apis/design/resource_names. }, + "userLabels": { # Labels which have been used to annotate the service. Label keys must start with a letter. Label keys and values may contain lowercase letters, numbers, underscores, and dashes. Label keys and values have a maximum length of 63 characters, and must be less than 128 bytes in size. Up to 64 label entries may be stored. For labels which do not have a semantic value, the empty string may be supplied for the label value. + "a_key": "A String", + }, }
cancel(name, body=None, x__xgafv=None)
Used by partners to cancel a subscription service by the end of the current billing cycle for their customers. It should be called directly by the partner using service accounts.
+Used by partners to cancel a subscription service either immediately or by the end of the current billing cycle for their customers. It should be called directly by the partner using service accounts.
Close httplib2 connections.
@@ -101,7 +101,7 @@cancel(name, body=None, x__xgafv=None)
- Used by partners to cancel a subscription service by the end of the current billing cycle for their customers. It should be called directly by the partner using service accounts. +Used by partners to cancel a subscription service either immediately or by the end of the current billing cycle for their customers. It should be called directly by the partner using service accounts. Args: name: string, Required. The name of the subscription resource to be cancelled. It will have the format of "partners/{partner_id}/subscriptions/{subscription_id}" (required) @@ -123,7 +123,7 @@Method Details
{ "subscription": { # A Subscription resource managed by 3P Partners. # The cancelled subscription resource. - "cancellationDetails": { # Describes the details of a cancelled subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. + "cancellationDetails": { # Describes the details of a cancelled or cancelling subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. "reason": "A String", # The reason of the cancellation. }, "createTime": "A String", # Output only. System generated timestamp when the subscription is created. UTC timezone. @@ -132,6 +132,7 @@Method Details
"freeTrialEndTime": "A String", # Output only. End of the free trial period, in ISO 8061 format. For example, "2019-08-31T17:28:54.564Z". It will be set the same as createTime if no free trial promotion is specified. "name": "A String", # Output only. Response only. Resource name of the subscription. It will have the format of "partners/{partner_id}/subscriptions/{subscription_id}" "partnerUserToken": "A String", # Required. Identifier of the end-user in partner’s system. The value is restricted to 63 ASCII characters at the maximum. + "processingState": "A String", # Output only. Describes the processing state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "products": [ # Required. Resource name that identifies one or more subscription products. The format will be 'partners/{partner_id}/products/{product_id}'. "A String", ], @@ -143,7 +144,7 @@Method Details
"postalCode": "A String", # The postal code this location refers to. Ex. "94043" "regionCode": "A String", # 2-letter ISO region code for current content region. Ex. “US” Please refers to: https://en.wikipedia.org/wiki/ISO_3166-1 }, - "state": "A String", # Output only. Descibes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). + "state": "A String", # Output only. Describes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "updateTime": "A String", # Output only. System generated timestamp when the subscription is most recently updated. UTC timezone. "upgradeDowngradeDetails": { # Details about the previous subscription that this new subscription upgrades/downgrades from. # Optional. Details about the previous subscription that this new subscription upgrades/downgrades from. Only populated if this subscription is an upgrade/downgrade from another subscription. "billingCycleSpec": "A String", # Required. Specifies the billing cycle spec for the new upgraded/downgraded subscription. @@ -168,7 +169,7 @@Method Details
The object takes the form of: { # A Subscription resource managed by 3P Partners. - "cancellationDetails": { # Describes the details of a cancelled subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. + "cancellationDetails": { # Describes the details of a cancelled or cancelling subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. "reason": "A String", # The reason of the cancellation. }, "createTime": "A String", # Output only. System generated timestamp when the subscription is created. UTC timezone. @@ -177,6 +178,7 @@Method Details
"freeTrialEndTime": "A String", # Output only. End of the free trial period, in ISO 8061 format. For example, "2019-08-31T17:28:54.564Z". It will be set the same as createTime if no free trial promotion is specified. "name": "A String", # Output only. Response only. Resource name of the subscription. It will have the format of "partners/{partner_id}/subscriptions/{subscription_id}" "partnerUserToken": "A String", # Required. Identifier of the end-user in partner’s system. The value is restricted to 63 ASCII characters at the maximum. + "processingState": "A String", # Output only. Describes the processing state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "products": [ # Required. Resource name that identifies one or more subscription products. The format will be 'partners/{partner_id}/products/{product_id}'. "A String", ], @@ -188,7 +190,7 @@Method Details
"postalCode": "A String", # The postal code this location refers to. Ex. "94043" "regionCode": "A String", # 2-letter ISO region code for current content region. Ex. “US” Please refers to: https://en.wikipedia.org/wiki/ISO_3166-1 }, - "state": "A String", # Output only. Descibes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). + "state": "A String", # Output only. Describes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "updateTime": "A String", # Output only. System generated timestamp when the subscription is most recently updated. UTC timezone. "upgradeDowngradeDetails": { # Details about the previous subscription that this new subscription upgrades/downgrades from. # Optional. Details about the previous subscription that this new subscription upgrades/downgrades from. Only populated if this subscription is an upgrade/downgrade from another subscription. "billingCycleSpec": "A String", # Required. Specifies the billing cycle spec for the new upgraded/downgraded subscription. @@ -206,7 +208,7 @@Method Details
An object of the form: { # A Subscription resource managed by 3P Partners. - "cancellationDetails": { # Describes the details of a cancelled subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. + "cancellationDetails": { # Describes the details of a cancelled or cancelling subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. "reason": "A String", # The reason of the cancellation. }, "createTime": "A String", # Output only. System generated timestamp when the subscription is created. UTC timezone. @@ -215,6 +217,7 @@Method Details
"freeTrialEndTime": "A String", # Output only. End of the free trial period, in ISO 8061 format. For example, "2019-08-31T17:28:54.564Z". It will be set the same as createTime if no free trial promotion is specified. "name": "A String", # Output only. Response only. Resource name of the subscription. It will have the format of "partners/{partner_id}/subscriptions/{subscription_id}" "partnerUserToken": "A String", # Required. Identifier of the end-user in partner’s system. The value is restricted to 63 ASCII characters at the maximum. + "processingState": "A String", # Output only. Describes the processing state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "products": [ # Required. Resource name that identifies one or more subscription products. The format will be 'partners/{partner_id}/products/{product_id}'. "A String", ], @@ -226,7 +229,7 @@Method Details
"postalCode": "A String", # The postal code this location refers to. Ex. "94043" "regionCode": "A String", # 2-letter ISO region code for current content region. Ex. “US” Please refers to: https://en.wikipedia.org/wiki/ISO_3166-1 }, - "state": "A String", # Output only. Descibes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). + "state": "A String", # Output only. Describes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "updateTime": "A String", # Output only. System generated timestamp when the subscription is most recently updated. UTC timezone. "upgradeDowngradeDetails": { # Details about the previous subscription that this new subscription upgrades/downgrades from. # Optional. Details about the previous subscription that this new subscription upgrades/downgrades from. Only populated if this subscription is an upgrade/downgrade from another subscription. "billingCycleSpec": "A String", # Required. Specifies the billing cycle spec for the new upgraded/downgraded subscription. @@ -257,7 +260,7 @@Method Details
{ "subscription": { # A Subscription resource managed by 3P Partners. # The subscription that has user linked to it. - "cancellationDetails": { # Describes the details of a cancelled subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. + "cancellationDetails": { # Describes the details of a cancelled or cancelling subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. "reason": "A String", # The reason of the cancellation. }, "createTime": "A String", # Output only. System generated timestamp when the subscription is created. UTC timezone. @@ -266,6 +269,7 @@Method Details
"freeTrialEndTime": "A String", # Output only. End of the free trial period, in ISO 8061 format. For example, "2019-08-31T17:28:54.564Z". It will be set the same as createTime if no free trial promotion is specified. "name": "A String", # Output only. Response only. Resource name of the subscription. It will have the format of "partners/{partner_id}/subscriptions/{subscription_id}" "partnerUserToken": "A String", # Required. Identifier of the end-user in partner’s system. The value is restricted to 63 ASCII characters at the maximum. + "processingState": "A String", # Output only. Describes the processing state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "products": [ # Required. Resource name that identifies one or more subscription products. The format will be 'partners/{partner_id}/products/{product_id}'. "A String", ], @@ -277,7 +281,7 @@Method Details
"postalCode": "A String", # The postal code this location refers to. Ex. "94043" "regionCode": "A String", # 2-letter ISO region code for current content region. Ex. “US” Please refers to: https://en.wikipedia.org/wiki/ISO_3166-1 }, - "state": "A String", # Output only. Descibes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). + "state": "A String", # Output only. Describes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "updateTime": "A String", # Output only. System generated timestamp when the subscription is most recently updated. UTC timezone. "upgradeDowngradeDetails": { # Details about the previous subscription that this new subscription upgrades/downgrades from. # Optional. Details about the previous subscription that this new subscription upgrades/downgrades from. Only populated if this subscription is an upgrade/downgrade from another subscription. "billingCycleSpec": "A String", # Required. Specifies the billing cycle spec for the new upgraded/downgraded subscription. @@ -336,7 +340,7 @@Method Details
An object of the form: { # A Subscription resource managed by 3P Partners. - "cancellationDetails": { # Describes the details of a cancelled subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. + "cancellationDetails": { # Describes the details of a cancelled or cancelling subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. "reason": "A String", # The reason of the cancellation. }, "createTime": "A String", # Output only. System generated timestamp when the subscription is created. UTC timezone. @@ -345,6 +349,7 @@Method Details
"freeTrialEndTime": "A String", # Output only. End of the free trial period, in ISO 8061 format. For example, "2019-08-31T17:28:54.564Z". It will be set the same as createTime if no free trial promotion is specified. "name": "A String", # Output only. Response only. Resource name of the subscription. It will have the format of "partners/{partner_id}/subscriptions/{subscription_id}" "partnerUserToken": "A String", # Required. Identifier of the end-user in partner’s system. The value is restricted to 63 ASCII characters at the maximum. + "processingState": "A String", # Output only. Describes the processing state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "products": [ # Required. Resource name that identifies one or more subscription products. The format will be 'partners/{partner_id}/products/{product_id}'. "A String", ], @@ -356,7 +361,7 @@Method Details
"postalCode": "A String", # The postal code this location refers to. Ex. "94043" "regionCode": "A String", # 2-letter ISO region code for current content region. Ex. “US” Please refers to: https://en.wikipedia.org/wiki/ISO_3166-1 }, - "state": "A String", # Output only. Descibes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). + "state": "A String", # Output only. Describes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "updateTime": "A String", # Output only. System generated timestamp when the subscription is most recently updated. UTC timezone. "upgradeDowngradeDetails": { # Details about the previous subscription that this new subscription upgrades/downgrades from. # Optional. Details about the previous subscription that this new subscription upgrades/downgrades from. Only populated if this subscription is an upgrade/downgrade from another subscription. "billingCycleSpec": "A String", # Required. Specifies the billing cycle spec for the new upgraded/downgraded subscription. @@ -375,7 +380,7 @@Method Details
The object takes the form of: { # A Subscription resource managed by 3P Partners. - "cancellationDetails": { # Describes the details of a cancelled subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. + "cancellationDetails": { # Describes the details of a cancelled or cancelling subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. "reason": "A String", # The reason of the cancellation. }, "createTime": "A String", # Output only. System generated timestamp when the subscription is created. UTC timezone. @@ -384,6 +389,7 @@Method Details
"freeTrialEndTime": "A String", # Output only. End of the free trial period, in ISO 8061 format. For example, "2019-08-31T17:28:54.564Z". It will be set the same as createTime if no free trial promotion is specified. "name": "A String", # Output only. Response only. Resource name of the subscription. It will have the format of "partners/{partner_id}/subscriptions/{subscription_id}" "partnerUserToken": "A String", # Required. Identifier of the end-user in partner’s system. The value is restricted to 63 ASCII characters at the maximum. + "processingState": "A String", # Output only. Describes the processing state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "products": [ # Required. Resource name that identifies one or more subscription products. The format will be 'partners/{partner_id}/products/{product_id}'. "A String", ], @@ -395,7 +401,7 @@Method Details
"postalCode": "A String", # The postal code this location refers to. Ex. "94043" "regionCode": "A String", # 2-letter ISO region code for current content region. Ex. “US” Please refers to: https://en.wikipedia.org/wiki/ISO_3166-1 }, - "state": "A String", # Output only. Descibes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). + "state": "A String", # Output only. Describes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "updateTime": "A String", # Output only. System generated timestamp when the subscription is most recently updated. UTC timezone. "upgradeDowngradeDetails": { # Details about the previous subscription that this new subscription upgrades/downgrades from. # Optional. Details about the previous subscription that this new subscription upgrades/downgrades from. Only populated if this subscription is an upgrade/downgrade from another subscription. "billingCycleSpec": "A String", # Required. Specifies the billing cycle spec for the new upgraded/downgraded subscription. @@ -413,7 +419,7 @@Method Details
An object of the form: { # A Subscription resource managed by 3P Partners. - "cancellationDetails": { # Describes the details of a cancelled subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. + "cancellationDetails": { # Describes the details of a cancelled or cancelling subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. "reason": "A String", # The reason of the cancellation. }, "createTime": "A String", # Output only. System generated timestamp when the subscription is created. UTC timezone. @@ -422,6 +428,7 @@Method Details
"freeTrialEndTime": "A String", # Output only. End of the free trial period, in ISO 8061 format. For example, "2019-08-31T17:28:54.564Z". It will be set the same as createTime if no free trial promotion is specified. "name": "A String", # Output only. Response only. Resource name of the subscription. It will have the format of "partners/{partner_id}/subscriptions/{subscription_id}" "partnerUserToken": "A String", # Required. Identifier of the end-user in partner’s system. The value is restricted to 63 ASCII characters at the maximum. + "processingState": "A String", # Output only. Describes the processing state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "products": [ # Required. Resource name that identifies one or more subscription products. The format will be 'partners/{partner_id}/products/{product_id}'. "A String", ], @@ -433,7 +440,7 @@Method Details
"postalCode": "A String", # The postal code this location refers to. Ex. "94043" "regionCode": "A String", # 2-letter ISO region code for current content region. Ex. “US” Please refers to: https://en.wikipedia.org/wiki/ISO_3166-1 }, - "state": "A String", # Output only. Descibes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). + "state": "A String", # Output only. Describes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "updateTime": "A String", # Output only. System generated timestamp when the subscription is most recently updated. UTC timezone. "upgradeDowngradeDetails": { # Details about the previous subscription that this new subscription upgrades/downgrades from. # Optional. Details about the previous subscription that this new subscription upgrades/downgrades from. Only populated if this subscription is an upgrade/downgrade from another subscription. "billingCycleSpec": "A String", # Required. Specifies the billing cycle spec for the new upgraded/downgraded subscription. @@ -464,7 +471,7 @@Method Details
{ # Response that contains the updated subscription resource. "subscription": { # A Subscription resource managed by 3P Partners. # The updated subscription resource. - "cancellationDetails": { # Describes the details of a cancelled subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. + "cancellationDetails": { # Describes the details of a cancelled or cancelling subscription. # Output only. Describes the details of a cancelled subscription. Only applicable to subscription of state `STATE_CANCELLED`. "reason": "A String", # The reason of the cancellation. }, "createTime": "A String", # Output only. System generated timestamp when the subscription is created. UTC timezone. @@ -473,6 +480,7 @@Method Details
"freeTrialEndTime": "A String", # Output only. End of the free trial period, in ISO 8061 format. For example, "2019-08-31T17:28:54.564Z". It will be set the same as createTime if no free trial promotion is specified. "name": "A String", # Output only. Response only. Resource name of the subscription. It will have the format of "partners/{partner_id}/subscriptions/{subscription_id}" "partnerUserToken": "A String", # Required. Identifier of the end-user in partner’s system. The value is restricted to 63 ASCII characters at the maximum. + "processingState": "A String", # Output only. Describes the processing state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "products": [ # Required. Resource name that identifies one or more subscription products. The format will be 'partners/{partner_id}/products/{product_id}'. "A String", ], @@ -484,7 +492,7 @@Method Details
"postalCode": "A String", # The postal code this location refers to. Ex. "94043" "regionCode": "A String", # 2-letter ISO region code for current content region. Ex. “US” Please refers to: https://en.wikipedia.org/wiki/ISO_3166-1 }, - "state": "A String", # Output only. Descibes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). + "state": "A String", # Output only. Describes the state of the subscription. See more details at [the lifecycle of a subscription](/payments/reseller/subscription/reference/index/Receive.Notifications#payments-subscription-lifecycle). "updateTime": "A String", # Output only. System generated timestamp when the subscription is most recently updated. UTC timezone. "upgradeDowngradeDetails": { # Details about the previous subscription that this new subscription upgrades/downgrades from. # Optional. Details about the previous subscription that this new subscription upgrades/downgrades from. Only populated if this subscription is an upgrade/downgrade from another subscription. "billingCycleSpec": "A String", # Required. Specifies the billing cycle spec for the new upgraded/downgraded subscription. diff --git a/docs/dyn/redis_v1.projects.locations.instances.html b/docs/dyn/redis_v1.projects.locations.instances.html index 6ccbd73f4d1..186925bd673 100644 --- a/docs/dyn/redis_v1.projects.locations.instances.html +++ b/docs/dyn/redis_v1.projects.locations.instances.html @@ -128,7 +128,7 @@Method Details
body: object, The request body. The object takes the form of: -{ # A Google Cloud Redis instance. +{ # A Google Cloud Redis instance. next id = 30 "alternativeLocationId": "A String", # Optional. Only applicable to STANDARD_HA tier which protects the instance against zonal failures by provisioning it across two zones. If provided, it must be a different zone from the one provided in location_id. "authEnabled": True or False, # Optional. Indicates whether OSS Redis AUTH is enabled for the instance. If set to "true" AUTH is enabled on the instance. Default value is "false" meaning AUTH is disabled. "authorizedNetwork": "A String", # Optional. The full name of the Google Compute Engine [network](https://cloud.google.com/vpc/docs/vpc) to which the instance is connected. If left unspecified, the `default` network will be used. @@ -355,7 +355,7 @@Method Details
Returns: An object of the form: - { # A Google Cloud Redis instance. + { # A Google Cloud Redis instance. next id = 30 "alternativeLocationId": "A String", # Optional. Only applicable to STANDARD_HA tier which protects the instance against zonal failures by provisioning it across two zones. If provided, it must be a different zone from the one provided in location_id. "authEnabled": True or False, # Optional. Indicates whether OSS Redis AUTH is enabled for the instance. If set to "true" AUTH is enabled on the instance. Default value is "false" meaning AUTH is disabled. "authorizedNetwork": "A String", # Optional. The full name of the Google Compute Engine [network](https://cloud.google.com/vpc/docs/vpc) to which the instance is connected. If left unspecified, the `default` network will be used. @@ -499,7 +499,7 @@Method Details
{ # Response for ListInstances. "instances": [ # A list of Redis instances in the project in the specified location, or across all locations. If the `location_id` in the parent field of the request is "-", all regions available to the project are queried, and the results aggregated. If in such an aggregated query a location is unavailable, a placeholder Redis entry is included in the response with the `name` field set to a value of the form `projects/{project_id}/locations/{location_id}/instances/`- and the `status` field set to ERROR and `status_message` field set to "location not available for ListInstances". - { # A Google Cloud Redis instance. + { # A Google Cloud Redis instance. next id = 30 "alternativeLocationId": "A String", # Optional. Only applicable to STANDARD_HA tier which protects the instance against zonal failures by provisioning it across two zones. If provided, it must be a different zone from the one provided in location_id. "authEnabled": True or False, # Optional. Indicates whether OSS Redis AUTH is enabled for the instance. If set to "true" AUTH is enabled on the instance. Default value is "false" meaning AUTH is disabled. "authorizedNetwork": "A String", # Optional. The full name of the Google Compute Engine [network](https://cloud.google.com/vpc/docs/vpc) to which the instance is connected. If left unspecified, the `default` network will be used. @@ -589,7 +589,7 @@Method Details
body: object, The request body. The object takes the form of: -{ # A Google Cloud Redis instance. +{ # A Google Cloud Redis instance. next id = 30 "alternativeLocationId": "A String", # Optional. Only applicable to STANDARD_HA tier which protects the instance against zonal failures by provisioning it across two zones. If provided, it must be a different zone from the one provided in location_id. "authEnabled": True or False, # Optional. Indicates whether OSS Redis AUTH is enabled for the instance. If set to "true" AUTH is enabled on the instance. Default value is "false" meaning AUTH is disabled. "authorizedNetwork": "A String", # Optional. The full name of the Google Compute Engine [network](https://cloud.google.com/vpc/docs/vpc) to which the instance is connected. If left unspecified, the `default` network will be used. diff --git a/docs/dyn/redis_v1beta1.projects.locations.instances.html b/docs/dyn/redis_v1beta1.projects.locations.instances.html index bae475a4beb..4b405ed5e82 100644 --- a/docs/dyn/redis_v1beta1.projects.locations.instances.html +++ b/docs/dyn/redis_v1beta1.projects.locations.instances.html @@ -128,7 +128,7 @@Method Details
body: object, The request body. The object takes the form of: -{ # A Google Cloud Redis instance. +{ # A Google Cloud Redis instance. next id = 30 "alternativeLocationId": "A String", # Optional. Only applicable to STANDARD_HA tier which protects the instance against zonal failures by provisioning it across two zones. If provided, it must be a different zone from the one provided in location_id. "authEnabled": True or False, # Optional. Indicates whether OSS Redis AUTH is enabled for the instance. If set to "true" AUTH is enabled on the instance. Default value is "false" meaning AUTH is disabled. "authorizedNetwork": "A String", # Optional. The full name of the Google Compute Engine [network](https://cloud.google.com/vpc/docs/vpc) to which the instance is connected. If left unspecified, the `default` network will be used. @@ -355,7 +355,7 @@Method Details
Returns: An object of the form: - { # A Google Cloud Redis instance. + { # A Google Cloud Redis instance. next id = 30 "alternativeLocationId": "A String", # Optional. Only applicable to STANDARD_HA tier which protects the instance against zonal failures by provisioning it across two zones. If provided, it must be a different zone from the one provided in location_id. "authEnabled": True or False, # Optional. Indicates whether OSS Redis AUTH is enabled for the instance. If set to "true" AUTH is enabled on the instance. Default value is "false" meaning AUTH is disabled. "authorizedNetwork": "A String", # Optional. The full name of the Google Compute Engine [network](https://cloud.google.com/vpc/docs/vpc) to which the instance is connected. If left unspecified, the `default` network will be used. @@ -499,7 +499,7 @@Method Details
{ # Response for ListInstances. "instances": [ # A list of Redis instances in the project in the specified location, or across all locations. If the `location_id` in the parent field of the request is "-", all regions available to the project are queried, and the results aggregated. If in such an aggregated query a location is unavailable, a placeholder Redis entry is included in the response with the `name` field set to a value of the form `projects/{project_id}/locations/{location_id}/instances/`- and the `status` field set to ERROR and `status_message` field set to "location not available for ListInstances". - { # A Google Cloud Redis instance. + { # A Google Cloud Redis instance. next id = 30 "alternativeLocationId": "A String", # Optional. Only applicable to STANDARD_HA tier which protects the instance against zonal failures by provisioning it across two zones. If provided, it must be a different zone from the one provided in location_id. "authEnabled": True or False, # Optional. Indicates whether OSS Redis AUTH is enabled for the instance. If set to "true" AUTH is enabled on the instance. Default value is "false" meaning AUTH is disabled. "authorizedNetwork": "A String", # Optional. The full name of the Google Compute Engine [network](https://cloud.google.com/vpc/docs/vpc) to which the instance is connected. If left unspecified, the `default` network will be used. @@ -589,7 +589,7 @@Method Details
body: object, The request body. The object takes the form of: -{ # A Google Cloud Redis instance. +{ # A Google Cloud Redis instance. next id = 30 "alternativeLocationId": "A String", # Optional. Only applicable to STANDARD_HA tier which protects the instance against zonal failures by provisioning it across two zones. If provided, it must be a different zone from the one provided in location_id. "authEnabled": True or False, # Optional. Indicates whether OSS Redis AUTH is enabled for the instance. If set to "true" AUTH is enabled on the instance. Default value is "false" meaning AUTH is disabled. "authorizedNetwork": "A String", # Optional. The full name of the Google Compute Engine [network](https://cloud.google.com/vpc/docs/vpc) to which the instance is connected. If left unspecified, the `default` network will be used. diff --git a/docs/dyn/retail_v2.projects.locations.catalogs.placements.html b/docs/dyn/retail_v2.projects.locations.catalogs.placements.html index 8a17f889129..c5cf29e6f49 100644 --- a/docs/dyn/retail_v2.projects.locations.catalogs.placements.html +++ b/docs/dyn/retail_v2.projects.locations.catalogs.placements.html @@ -102,7 +102,7 @@Method Details
}, "pageSize": 42, # Maximum number of results to return per page. Set this property to the number of prediction results needed. If zero, the service will choose a reasonable default. The maximum allowed value is 100. Values above 100 will be coerced to 100. "pageToken": "A String", # The previous PredictResponse.next_page_token. - "params": { # Additional domain specific parameters for the predictions. Allowed values: * `returnProduct`: Boolean. If set to true, the associated product object will be returned in the `results.metadata` field in the prediction response. * `returnScore`: Boolean. If set to true, the prediction 'score' corresponding to each returned product will be set in the `results.metadata` field in the prediction response. The given 'score' indicates the probability of an product being clicked/purchased given the user's context and history. * `strictFiltering`: Boolean. True by default. If set to false, the service will return generic (unfiltered) popular products instead of empty if your filter blocks all prediction results. * `priceRerankLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-price-reranking', 'low-price-reranking', 'medium-price-reranking', 'high-price-reranking'}. This gives request level control and adjust prediction results based on product price. * `diversityLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-diversity', 'low-diversity', 'medium-diversity', 'high-diversity', 'auto-diversity'}. This gives request level control and adjust prediction results based on product category. + "params": { # Additional domain specific parameters for the predictions. Allowed values: * `returnProduct`: Boolean. If set to true, the associated product object will be returned in the `results.metadata` field in the prediction response. * `returnScore`: Boolean. If set to true, the prediction 'score' corresponding to each returned product will be set in the `results.metadata` field in the prediction response. The given 'score' indicates the probability of an product being clicked/purchased given the user's context and history. * `strictFiltering`: Boolean. True by default. If set to false, the service will return generic (unfiltered) popular products instead of empty if your filter blocks all prediction results. * `priceRerankLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-price-reranking', 'low-price-reranking', 'medium-price-reranking', 'high-price-reranking'}. This gives request-level control and adjusts prediction results based on product price. * `diversityLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-diversity', 'low-diversity', 'medium-diversity', 'high-diversity', 'auto-diversity'}. This gives request-level control and adjusts prediction results based on product category. "a_key": "", }, "userEvent": { # UserEvent captures all metadata information Retail API needs to know about how end users interact with customers' website. # Required. Context about the user, what they are looking at and what action they took to trigger the predict request. Note that this user event detail won't be ingested to userEvent logs. Thus, a separate userEvent write request is required for event logging. @@ -185,8 +185,8 @@Method Details
"uri": "A String", # Complete URL (window.location.href) of the user's current page. When using the client side event reporting with JavaScript pixel and Google Tag Manager, this value is filled in automatically. Maximum length 5,000 characters. "userInfo": { # Information of an end user. # User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. - "ipAddress": "A String", # The end user's IP address. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. - "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "ipAddress": "A String", # The end user's IP address. Required for getting SearchRespons.sponsored_results. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "userAgent": "A String", # User agent as included in the HTTP header. Required for getting SearchRespons.sponsored_results. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. }, "visitorId": "A String", # Required. A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor log in/out of the website. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. The field should not contain PII or user-data. We recommend to use Google Analystics [Client ID](https://developers.google.com/analytics/devguides/collection/analyticsjs/field-reference#clientId) for this field. diff --git a/docs/dyn/retail_v2.projects.locations.catalogs.userEvents.html b/docs/dyn/retail_v2.projects.locations.catalogs.userEvents.html index 45bb8be3e73..7590a185ce1 100644 --- a/docs/dyn/retail_v2.projects.locations.catalogs.userEvents.html +++ b/docs/dyn/retail_v2.projects.locations.catalogs.userEvents.html @@ -235,8 +235,8 @@Method Details
"uri": "A String", # Complete URL (window.location.href) of the user's current page. When using the client side event reporting with JavaScript pixel and Google Tag Manager, this value is filled in automatically. Maximum length 5,000 characters. "userInfo": { # Information of an end user. # User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. - "ipAddress": "A String", # The end user's IP address. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. - "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "ipAddress": "A String", # The end user's IP address. Required for getting SearchRespons.sponsored_results. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "userAgent": "A String", # User agent as included in the HTTP header. Required for getting SearchRespons.sponsored_results. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. }, "visitorId": "A String", # Required. A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor log in/out of the website. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. The field should not contain PII or user-data. We recommend to use Google Analystics [Client ID](https://developers.google.com/analytics/devguides/collection/analyticsjs/field-reference#clientId) for this field. @@ -449,8 +449,8 @@Method Details
"uri": "A String", # Complete URL (window.location.href) of the user's current page. When using the client side event reporting with JavaScript pixel and Google Tag Manager, this value is filled in automatically. Maximum length 5,000 characters. "userInfo": { # Information of an end user. # User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. - "ipAddress": "A String", # The end user's IP address. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. - "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "ipAddress": "A String", # The end user's IP address. Required for getting SearchRespons.sponsored_results. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "userAgent": "A String", # User agent as included in the HTTP header. Required for getting SearchRespons.sponsored_results. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. }, "visitorId": "A String", # Required. A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor log in/out of the website. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. The field should not contain PII or user-data. We recommend to use Google Analystics [Client ID](https://developers.google.com/analytics/devguides/collection/analyticsjs/field-reference#clientId) for this field. @@ -544,8 +544,8 @@Method Details
"uri": "A String", # Complete URL (window.location.href) of the user's current page. When using the client side event reporting with JavaScript pixel and Google Tag Manager, this value is filled in automatically. Maximum length 5,000 characters. "userInfo": { # Information of an end user. # User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. - "ipAddress": "A String", # The end user's IP address. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. - "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "ipAddress": "A String", # The end user's IP address. Required for getting SearchRespons.sponsored_results. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "userAgent": "A String", # User agent as included in the HTTP header. Required for getting SearchRespons.sponsored_results. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. }, "visitorId": "A String", # Required. A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor log in/out of the website. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. The field should not contain PII or user-data. We recommend to use Google Analystics [Client ID](https://developers.google.com/analytics/devguides/collection/analyticsjs/field-reference#clientId) for this field. diff --git a/docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html b/docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html index d414bc3a8ec..554c03f0c27 100644 --- a/docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html +++ b/docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html @@ -102,7 +102,7 @@Method Details
}, "pageSize": 42, # Maximum number of results to return per page. Set this property to the number of prediction results needed. If zero, the service will choose a reasonable default. The maximum allowed value is 100. Values above 100 will be coerced to 100. "pageToken": "A String", # The previous PredictResponse.next_page_token. - "params": { # Additional domain specific parameters for the predictions. Allowed values: * `returnProduct`: Boolean. If set to true, the associated product object will be returned in the `results.metadata` field in the prediction response. * `returnScore`: Boolean. If set to true, the prediction 'score' corresponding to each returned product will be set in the `results.metadata` field in the prediction response. The given 'score' indicates the probability of an product being clicked/purchased given the user's context and history. * `strictFiltering`: Boolean. True by default. If set to false, the service will return generic (unfiltered) popular products instead of empty if your filter blocks all prediction results. * `priceRerankLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-price-reranking', 'low-price-reranking', 'medium-price-reranking', 'high-price-reranking'}. This gives request level control and adjust prediction results based on product price. * `diversityLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-diversity', 'low-diversity', 'medium-diversity', 'high-diversity', 'auto-diversity'}. This gives request level control and adjust prediction results based on product category. + "params": { # Additional domain specific parameters for the predictions. Allowed values: * `returnProduct`: Boolean. If set to true, the associated product object will be returned in the `results.metadata` field in the prediction response. * `returnScore`: Boolean. If set to true, the prediction 'score' corresponding to each returned product will be set in the `results.metadata` field in the prediction response. The given 'score' indicates the probability of an product being clicked/purchased given the user's context and history. * `strictFiltering`: Boolean. True by default. If set to false, the service will return generic (unfiltered) popular products instead of empty if your filter blocks all prediction results. * `priceRerankLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-price-reranking', 'low-price-reranking', 'medium-price-reranking', 'high-price-reranking'}. This gives request-level control and adjusts prediction results based on product price. * `diversityLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-diversity', 'low-diversity', 'medium-diversity', 'high-diversity', 'auto-diversity'}. This gives request-level control and adjusts prediction results based on product category. "a_key": "", }, "userEvent": { # UserEvent captures all metadata information Retail API needs to know about how end users interact with customers' website. # Required. Context about the user, what they are looking at and what action they took to trigger the predict request. Note that this user event detail won't be ingested to userEvent logs. Thus, a separate userEvent write request is required for event logging. @@ -185,8 +185,8 @@Method Details
"uri": "A String", # Complete URL (window.location.href) of the user's current page. When using the client side event reporting with JavaScript pixel and Google Tag Manager, this value is filled in automatically. Maximum length 5,000 characters. "userInfo": { # Information of an end user. # User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. - "ipAddress": "A String", # The end user's IP address. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. - "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "ipAddress": "A String", # The end user's IP address. Required for getting SearchRespons.sponsored_results. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "userAgent": "A String", # User agent as included in the HTTP header. Required for getting SearchRespons.sponsored_results. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. }, "visitorId": "A String", # Required. A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor log in/out of the website. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. The field should not contain PII or user-data. We recommend to use Google Analystics [Client ID](https://developers.google.com/analytics/devguides/collection/analyticsjs/field-reference#clientId) for this field. diff --git a/docs/dyn/retail_v2alpha.projects.locations.catalogs.userEvents.html b/docs/dyn/retail_v2alpha.projects.locations.catalogs.userEvents.html index 50784d94058..902187947ca 100644 --- a/docs/dyn/retail_v2alpha.projects.locations.catalogs.userEvents.html +++ b/docs/dyn/retail_v2alpha.projects.locations.catalogs.userEvents.html @@ -235,8 +235,8 @@Method Details
"uri": "A String", # Complete URL (window.location.href) of the user's current page. When using the client side event reporting with JavaScript pixel and Google Tag Manager, this value is filled in automatically. Maximum length 5,000 characters. "userInfo": { # Information of an end user. # User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. - "ipAddress": "A String", # The end user's IP address. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. - "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "ipAddress": "A String", # The end user's IP address. Required for getting SearchRespons.sponsored_results. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "userAgent": "A String", # User agent as included in the HTTP header. Required for getting SearchRespons.sponsored_results. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. }, "visitorId": "A String", # Required. A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor log in/out of the website. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. The field should not contain PII or user-data. We recommend to use Google Analystics [Client ID](https://developers.google.com/analytics/devguides/collection/analyticsjs/field-reference#clientId) for this field. @@ -449,8 +449,8 @@Method Details
"uri": "A String", # Complete URL (window.location.href) of the user's current page. When using the client side event reporting with JavaScript pixel and Google Tag Manager, this value is filled in automatically. Maximum length 5,000 characters. "userInfo": { # Information of an end user. # User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. - "ipAddress": "A String", # The end user's IP address. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. - "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "ipAddress": "A String", # The end user's IP address. Required for getting SearchRespons.sponsored_results. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "userAgent": "A String", # User agent as included in the HTTP header. Required for getting SearchRespons.sponsored_results. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. }, "visitorId": "A String", # Required. A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor log in/out of the website. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. The field should not contain PII or user-data. We recommend to use Google Analystics [Client ID](https://developers.google.com/analytics/devguides/collection/analyticsjs/field-reference#clientId) for this field. @@ -544,8 +544,8 @@Method Details
"uri": "A String", # Complete URL (window.location.href) of the user's current page. When using the client side event reporting with JavaScript pixel and Google Tag Manager, this value is filled in automatically. Maximum length 5,000 characters. "userInfo": { # Information of an end user. # User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. - "ipAddress": "A String", # The end user's IP address. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. - "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "ipAddress": "A String", # The end user's IP address. Required for getting SearchRespons.sponsored_results. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "userAgent": "A String", # User agent as included in the HTTP header. Required for getting SearchRespons.sponsored_results. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. }, "visitorId": "A String", # Required. A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor log in/out of the website. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. The field should not contain PII or user-data. We recommend to use Google Analystics [Client ID](https://developers.google.com/analytics/devguides/collection/analyticsjs/field-reference#clientId) for this field. diff --git a/docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html b/docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html index 1b93ab35c89..446f9c17f07 100644 --- a/docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html +++ b/docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html @@ -102,7 +102,7 @@Method Details
}, "pageSize": 42, # Maximum number of results to return per page. Set this property to the number of prediction results needed. If zero, the service will choose a reasonable default. The maximum allowed value is 100. Values above 100 will be coerced to 100. "pageToken": "A String", # The previous PredictResponse.next_page_token. - "params": { # Additional domain specific parameters for the predictions. Allowed values: * `returnProduct`: Boolean. If set to true, the associated product object will be returned in the `results.metadata` field in the prediction response. * `returnScore`: Boolean. If set to true, the prediction 'score' corresponding to each returned product will be set in the `results.metadata` field in the prediction response. The given 'score' indicates the probability of an product being clicked/purchased given the user's context and history. * `strictFiltering`: Boolean. True by default. If set to false, the service will return generic (unfiltered) popular products instead of empty if your filter blocks all prediction results. * `priceRerankLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-price-reranking', 'low-price-reranking', 'medium-price-reranking', 'high-price-reranking'}. This gives request level control and adjust prediction results based on product price. * `diversityLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-diversity', 'low-diversity', 'medium-diversity', 'high-diversity', 'auto-diversity'}. This gives request level control and adjust prediction results based on product category. + "params": { # Additional domain specific parameters for the predictions. Allowed values: * `returnProduct`: Boolean. If set to true, the associated product object will be returned in the `results.metadata` field in the prediction response. * `returnScore`: Boolean. If set to true, the prediction 'score' corresponding to each returned product will be set in the `results.metadata` field in the prediction response. The given 'score' indicates the probability of an product being clicked/purchased given the user's context and history. * `strictFiltering`: Boolean. True by default. If set to false, the service will return generic (unfiltered) popular products instead of empty if your filter blocks all prediction results. * `priceRerankLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-price-reranking', 'low-price-reranking', 'medium-price-reranking', 'high-price-reranking'}. This gives request-level control and adjusts prediction results based on product price. * `diversityLevel`: String. Default empty. If set to be non-empty, then it needs to be one of {'no-diversity', 'low-diversity', 'medium-diversity', 'high-diversity', 'auto-diversity'}. This gives request-level control and adjusts prediction results based on product category. "a_key": "", }, "userEvent": { # UserEvent captures all metadata information Retail API needs to know about how end users interact with customers' website. # Required. Context about the user, what they are looking at and what action they took to trigger the predict request. Note that this user event detail won't be ingested to userEvent logs. Thus, a separate userEvent write request is required for event logging. @@ -185,8 +185,8 @@Method Details
"uri": "A String", # Complete URL (window.location.href) of the user's current page. When using the client side event reporting with JavaScript pixel and Google Tag Manager, this value is filled in automatically. Maximum length 5,000 characters. "userInfo": { # Information of an end user. # User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. - "ipAddress": "A String", # The end user's IP address. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. - "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "ipAddress": "A String", # The end user's IP address. Required for getting SearchRespons.sponsored_results. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "userAgent": "A String", # User agent as included in the HTTP header. Required for getting SearchRespons.sponsored_results. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. }, "visitorId": "A String", # Required. A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor log in/out of the website. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. The field should not contain PII or user-data. We recommend to use Google Analystics [Client ID](https://developers.google.com/analytics/devguides/collection/analyticsjs/field-reference#clientId) for this field. diff --git a/docs/dyn/retail_v2beta.projects.locations.catalogs.userEvents.html b/docs/dyn/retail_v2beta.projects.locations.catalogs.userEvents.html index 6e7ed84a84c..f6dd7d86b97 100644 --- a/docs/dyn/retail_v2beta.projects.locations.catalogs.userEvents.html +++ b/docs/dyn/retail_v2beta.projects.locations.catalogs.userEvents.html @@ -235,8 +235,8 @@Method Details
"uri": "A String", # Complete URL (window.location.href) of the user's current page. When using the client side event reporting with JavaScript pixel and Google Tag Manager, this value is filled in automatically. Maximum length 5,000 characters. "userInfo": { # Information of an end user. # User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. - "ipAddress": "A String", # The end user's IP address. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. - "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "ipAddress": "A String", # The end user's IP address. Required for getting SearchRespons.sponsored_results. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "userAgent": "A String", # User agent as included in the HTTP header. Required for getting SearchRespons.sponsored_results. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. }, "visitorId": "A String", # Required. A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor log in/out of the website. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. The field should not contain PII or user-data. We recommend to use Google Analystics [Client ID](https://developers.google.com/analytics/devguides/collection/analyticsjs/field-reference#clientId) for this field. @@ -449,8 +449,8 @@Method Details
"uri": "A String", # Complete URL (window.location.href) of the user's current page. When using the client side event reporting with JavaScript pixel and Google Tag Manager, this value is filled in automatically. Maximum length 5,000 characters. "userInfo": { # Information of an end user. # User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. - "ipAddress": "A String", # The end user's IP address. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. - "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "ipAddress": "A String", # The end user's IP address. Required for getting SearchRespons.sponsored_results. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "userAgent": "A String", # User agent as included in the HTTP header. Required for getting SearchRespons.sponsored_results. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. }, "visitorId": "A String", # Required. A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor log in/out of the website. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. The field should not contain PII or user-data. We recommend to use Google Analystics [Client ID](https://developers.google.com/analytics/devguides/collection/analyticsjs/field-reference#clientId) for this field. @@ -544,8 +544,8 @@Method Details
"uri": "A String", # Complete URL (window.location.href) of the user's current page. When using the client side event reporting with JavaScript pixel and Google Tag Manager, this value is filled in automatically. Maximum length 5,000 characters. "userInfo": { # Information of an end user. # User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. - "ipAddress": "A String", # The end user's IP address. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. - "userAgent": "A String", # User agent as included in the HTTP header. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "ipAddress": "A String", # The end user's IP address. Required for getting SearchRespons.sponsored_results. This field is used to extract location information for personalization. This field must be either an IPv4 address (e.g. "104.133.9.80") or an IPv6 address (e.g. "2001:0db8:85a3:0000:0000:8a2e:0370:7334"). Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. + "userAgent": "A String", # User agent as included in the HTTP header. Required for getting SearchRespons.sponsored_results. The field must be a UTF-8 encoded string with a length limit of 1,000 characters. Otherwise, an INVALID_ARGUMENT error is returned. This should not be set when using the client side event reporting with GTM or JavaScript tag in UserEventService.CollectUserEvent or if direct_user_request is set. "userId": "A String", # Highly recommended for logged-in users. Unique identifier for logged-in user, such as a user name. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. }, "visitorId": "A String", # Required. A unique identifier for tracking visitors. For example, this could be implemented with an HTTP cookie, which should be able to uniquely identify a visitor on a single device. This unique identifier should not change if the visitor log in/out of the website. The field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. The field should not contain PII or user-data. We recommend to use Google Analystics [Client ID](https://developers.google.com/analytics/devguides/collection/analyticsjs/field-reference#clientId) for this field. diff --git a/docs/dyn/run_v1alpha1.namespaces.jobs.html b/docs/dyn/run_v1alpha1.namespaces.jobs.html index a8396af9868..dd8a5197038 100644 --- a/docs/dyn/run_v1alpha1.namespaces.jobs.html +++ b/docs/dyn/run_v1alpha1.namespaces.jobs.html @@ -107,25 +107,25 @@Method Details
{ # Job represents the configuration of a single job. A job an immutable resource that references a container image which is run to completion. "apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources +optional "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds +optional - "metadata": { # ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional - "annotations": { # Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations +optional + "metadata": { # k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional + "annotations": { # (Optional) Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations "a_key": "A String", }, - "clusterName": "A String", # Not currently supported by Cloud Run. The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. +optional - "creationTimestamp": "A String", # CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional - "deletionGracePeriodSeconds": 42, # Not currently supported by Cloud Run. Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. +optional - "deletionTimestamp": "A String", # DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional - "finalizers": [ # Not currently supported by Cloud Run. Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. +optional +patchStrategy=merge + "clusterName": "A String", # (Optional) Not supported by Cloud Run The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + "creationTimestamp": "A String", # (Optional) CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + "deletionGracePeriodSeconds": 42, # (Optional) Not supported by Cloud Run Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. + "deletionTimestamp": "A String", # (Optional) Not supported by Cloud Run DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + "finalizers": [ # (Optional) Not supported by Cloud Run Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. +patchStrategy=merge "A String", ], - "generateName": "A String", # Not currently supported by Cloud Run. GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency +optional string generateName = 2; - "generation": 42, # A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. +optional - "labels": { # Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and routes. More info: http://kubernetes.io/docs/user-guide/labels +optional + "generateName": "A String", # (Optional) Not supported by Cloud Run GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency string generateName = 2; + "generation": 42, # (Optional) A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. + "labels": { # (Optional) Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and routes. More info: http://kubernetes.io/docs/user-guide/labels "a_key": "A String", }, "name": "A String", # Name must be unique within a namespace, within a Cloud Run region. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names +optional "namespace": "A String", # Namespace defines the space within each name must be unique, within a Cloud Run region. In Cloud Run the namespace must be equal to either the project ID or project number. - "ownerReferences": [ # List of objects that own this object. If ALL objects in the list have been deleted, this object will be garbage collected. +optional + "ownerReferences": [ # (Optional) Not supported by Cloud Run List of objects that own this object. If ALL objects in the list have been deleted, this object will be garbage collected. { # OwnerReference contains enough information to let you identify an owning object. Currently, an owning object must be in the same namespace, so there is no namespace field. "apiVersion": "A String", # API version of the referent. "blockOwnerDeletion": True or False, # If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. +optional @@ -135,9 +135,9 @@Method Details
"uid": "A String", # UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids }, ], - "resourceVersion": "A String", # An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency +optional - "selfLink": "A String", # SelfLink is a URL representing this object. Populated by the system. Read-only. +optional string selfLink = 4; - "uid": "A String", # UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids +optional + "resourceVersion": "A String", # Optional. An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server or omit the value to disable conflict-detection. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients or omitted. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + "selfLink": "A String", # (Optional) SelfLink is a URL representing this object. Populated by the system. Read-only. string selfLink = 4; + "uid": "A String", # (Optional) UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids }, "spec": { # JobSpec describes how the job execution will look like. # Optional. Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status +optional "activeDeadlineSeconds": "A String", # Optional. Not supported. Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it. If set to zero, the system will never attempt to terminate the job based on time. Otherwise, the value must be positive integer. +optional @@ -149,299 +149,199 @@Method Details
"activeDeadlineSeconds": "A String", # Optional. Optional duration in seconds the instance may be active relative to StartTime before the system will actively try to mark it failed and kill associated containers. If set to zero, the system will never attempt to kill an instance based on time. Otherwise, value must be a positive integer. +optional "containers": [ # Optional. List of containers belonging to the instance. We disallow a number of fields on this Container. Only a single container may be provided. { # A single application container. This specifies both the container to run, the command to run in the container and the arguments to supply to it. Note that additional arguments may be supplied by the system to the container at runtime. - "args": [ # Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell +optional + "args": [ # (Optional) Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell "A String", ], - "command": [ # Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell +optional + "command": [ "A String", ], - "env": [ # List of environment variables to set in the container. Cannot be updated. +optional + "env": [ # (Optional) List of environment variables to set in the container. { # EnvVar represents an environment variable present in a Container. "name": "A String", # Name of the environment variable. Must be a C_IDENTIFIER. - "value": "A String", # Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". +optional - "valueFrom": { # Cloud Run fully managed: not supported Cloud Run on GKE: supported EnvVarSource represents a source for the value of an EnvVar. # Cloud Run fully managed: supported Source for the environment variable's value. Only supports secret_key_ref. Cloud Run for Anthos: supported Source for the environment variable's value. Cannot be used if value is not empty. +optional - "configMapKeyRef": { # Cloud Run fully managed: not supported Cloud Run on GKE: supported Selects a key from a ConfigMap. # Cloud Run fully managed: not supported Cloud Run on GKE: supported Selects a key of a ConfigMap. +optional - "key": "A String", # Cloud Run fully managed: not supported Cloud Run on GKE: supported The key to select. - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "value": "A String", # (Optional) Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". + "valueFrom": { # EnvVarSource represents a source for the value of an EnvVar. # (Optional) Source for the environment variable's value. Only supports secret_key_ref. Source for the environment variable's value. Cannot be used if value is not empty. + "configMapKeyRef": { # Not supported by Cloud Run Selects a key from a ConfigMap. # (Optional) Not supported by Cloud Run Selects a key of a ConfigMap. + "key": "A String", # The key to select. + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: not supported Cloud Run on GKE: supported The ConfigMap to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run on GKE: supported Specify whether the ConfigMap or its key must be defined +optional + "name": "A String", # The ConfigMap to select from. + "optional": True or False, # (Optional) Specify whether the ConfigMap or its key must be defined }, - "secretKeyRef": { # Cloud Run fully managed: supported Cloud Run on GKE: supported SecretKeySelector selects a key of a Secret. # Cloud Run fully managed: supported. Selects a key (version) of a secret in Secret Manager. Cloud Run for Anthos: supported. Selects a key of a secret in the pod's namespace. +optional - "key": "A String", # Cloud Run fully managed: supported A Cloud Secret Manager secret version. Must be 'latest' for the latest version or an integer for a specific version. Cloud Run for Anthos: supported The key of the secret to select from. Must be a valid secret key. - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "secretKeyRef": { # SecretKeySelector selects a key of a Secret. # (Optional) Selects a key (version) of a secret in Secret Manager. + "key": "A String", # A Cloud Secret Manager secret version. Must be 'latest' for the latest version or an integer for a specific version. The key of the secret to select from. Must be a valid secret key. + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: supported The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. Cloud Run for Anthos: supported The name of the secret in the pod's namespace to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run on GKE: supported Specify whether the Secret or its key must be defined +optional + "name": "A String", # The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. The name of the secret in the pod's namespace to select from. + "optional": True or False, # (Optional) Specify whether the Secret or its key must be defined }, }, }, ], - "envFrom": [ # List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. +optional - { # EnvFromSource represents the source of a set of ConfigMaps - "configMapRef": { # ConfigMapEnvSource selects a ConfigMap to populate the environment variables with. The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables. # The ConfigMap to select from +optional - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "envFrom": [ # (Optional) List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + { # Not supported by Cloud Run EnvFromSource represents the source of a set of ConfigMaps + "configMapRef": { # Not supported by Cloud Run ConfigMapEnvSource selects a ConfigMap to populate the environment variables with. The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables. # (Optional) The ConfigMap to select from + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: not supported Cloud Run for Anthos: supported The ConfigMap to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run for Anthos: supported Specify whether the ConfigMap must be defined +optional + "name": "A String", # The ConfigMap to select from. + "optional": True or False, # (Optional) Specify whether the ConfigMap must be defined }, - "prefix": "A String", # An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. +optional - "secretRef": { # SecretEnvSource selects a Secret to populate the environment variables with. The contents of the target Secret's Data field will represent the key-value pairs as environment variables. # The Secret to select from +optional - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "prefix": "A String", # (Optional) An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + "secretRef": { # Not supported by Cloud Run SecretEnvSource selects a Secret to populate the environment variables with. The contents of the target Secret's Data field will represent the key-value pairs as environment variables. # (Optional) The Secret to select from + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: not supported Cloud Run for Anthos: supported The Secret to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run for Anthos: supported Specify whether the Secret must be defined +optional + "name": "A String", # The Secret to select from. + "optional": True or False, # (Optional) Specify whether the Secret must be defined }, }, ], - "image": "A String", # Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images - "imagePullPolicy": "A String", # Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images +optional - "lifecycle": { # Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted. # Actions that the management system should take in response to container lifecycle events. Cannot be updated. +optional - "postStart": { # Handler defines a specific action that should be taken # PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +optional - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - }, + "image": "A String", # Only supports containers from Google Container Registry or Artifact Registry URL of the Container image. More info: https://kubernetes.io/docs/concepts/containers/images + "imagePullPolicy": "A String", # (Optional) Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + "livenessProbe": { # Not supported by Cloud Run Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # (Optional) Periodic probe of container liveness. Container will be restarted if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "exec": { # Not supported by Cloud Run ExecAction describes a "run in container" action. # (Optional) One and only one of the following should be specified. Exec specifies the action to take. A field inlined from the Handler message. + "command": [ # (Optional) Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + "A String", + ], }, - "preStop": { # Handler defines a specific action that should be taken # PreStop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +optional - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. + "failureThreshold": 42, # (Optional) Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + "httpGet": { # Not supported by Cloud Run HTTPGetAction describes an action based on HTTP Get requests. # (Optional) HTTPGet specifies the http request to perform. A field inlined from the Handler message. + "host": "A String", # (Optional) Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + "httpHeaders": [ # (Optional) Custom headers to set in the request. HTTP allows repeated headers. + { # Not supported by Cloud Run HTTPHeader describes a custom header to be used in HTTP probes + "name": "A String", # The header field name + "value": "A String", # The header field value }, - }, + ], + "path": "A String", # (Optional) Path to access on the HTTP server. + "scheme": "A String", # (Optional) Scheme to use for connecting to the host. Defaults to HTTP. }, - }, - "livenessProbe": { # Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "failureThreshold": 42, # Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. +optional - "handler": { # Handler defines a specific action that should be taken # The action taken to determine the health of a container - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - }, + "initialDelaySeconds": 42, # (Optional) Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "periodSeconds": 42, # (Optional) How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + "successThreshold": 42, # (Optional) Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + "tcpSocket": { # Not supported by Cloud Run TCPSocketAction describes an action based on opening a socket # (Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported A field inlined from the Handler message. + "host": "A String", # (Optional) Optional: Host name to connect to, defaults to the pod IP. + "port": 42, # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. This field is currently limited to integer types only because of proto's inability to properly support the IntOrString golang type. }, - "initialDelaySeconds": 42, # Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "periodSeconds": 42, # How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. +optional - "successThreshold": 42, # Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. +optional - "timeoutSeconds": 42, # Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional + "timeoutSeconds": 42, # (Optional) Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes }, - "name": "A String", # Name of the container specified as a DNS_LABEL. Each container must have a unique name (DNS_LABEL). Cannot be updated. - "ports": [ # List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated. +optional + "name": "A String", # (Optional) Name of the container specified as a DNS_LABEL. Currently unused in Cloud Run. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names + "ports": [ # (Optional) List of ports to expose from the container. Only a single port can be specified. The specified ports must be listening on all interfaces (0.0.0.0) within the container to be accessible. If omitted, a port number will be chosen and passed to the container through the PORT environment variable for the container to listen on. { # ContainerPort represents a network port in a single container. - "containerPort": 42, # Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. - "hostIP": "A String", # What host IP to bind the external port to. +optional - "hostPort": 42, # Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. +optional - "name": "A String", # If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. +optional - "protocol": "A String", # Protocol for port. Must be UDP or TCP. Defaults to "TCP". +optional + "containerPort": 42, # (Optional) Port number the container listens on. This must be a valid port number, 0 < x < 65536. + "name": "A String", # (Optional) If specified, used to specify which protocol to use. Allowed values are "http1" and "h2c". + "protocol": "A String", # (Optional) Protocol for port. Must be "TCP". Defaults to "TCP". }, ], - "readinessProbe": { # Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "failureThreshold": 42, # Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. +optional - "handler": { # Handler defines a specific action that should be taken # The action taken to determine the health of a container - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. + "readinessProbe": { # Not supported by Cloud Run Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # (Optional) Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "exec": { # Not supported by Cloud Run ExecAction describes a "run in container" action. # (Optional) One and only one of the following should be specified. Exec specifies the action to take. A field inlined from the Handler message. + "command": [ # (Optional) Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + "A String", + ], + }, + "failureThreshold": 42, # (Optional) Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + "httpGet": { # Not supported by Cloud Run HTTPGetAction describes an action based on HTTP Get requests. # (Optional) HTTPGet specifies the http request to perform. A field inlined from the Handler message. + "host": "A String", # (Optional) Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + "httpHeaders": [ # (Optional) Custom headers to set in the request. HTTP allows repeated headers. + { # Not supported by Cloud Run HTTPHeader describes a custom header to be used in HTTP probes + "name": "A String", # The header field name + "value": "A String", # The header field value }, - }, + ], + "path": "A String", # (Optional) Path to access on the HTTP server. + "scheme": "A String", # (Optional) Scheme to use for connecting to the host. Defaults to HTTP. + }, + "initialDelaySeconds": 42, # (Optional) Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "periodSeconds": 42, # (Optional) How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + "successThreshold": 42, # (Optional) Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + "tcpSocket": { # Not supported by Cloud Run TCPSocketAction describes an action based on opening a socket # (Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported A field inlined from the Handler message. + "host": "A String", # (Optional) Optional: Host name to connect to, defaults to the pod IP. + "port": 42, # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. This field is currently limited to integer types only because of proto's inability to properly support the IntOrString golang type. }, - "initialDelaySeconds": 42, # Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "periodSeconds": 42, # How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. +optional - "successThreshold": 42, # Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. +optional - "timeoutSeconds": 42, # Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional + "timeoutSeconds": 42, # (Optional) Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes }, - "resources": { # ResourceRequirements describes the compute resource requirements. # Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources +optional - "limits": { # Limits describes the maximum amount of compute resources allowed. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go + "resources": { # ResourceRequirements describes the compute resource requirements. # (Optional) Compute Resources required by this container. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + "limits": { # (Optional) Only memory and CPU are supported. Note: The only supported values for CPU are '1', '2', and '4'. Setting 4 CPU requires at least 2Gi of memory. Limits describes the maximum amount of compute resources allowed. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go "a_key": "A String", }, - "limitsInMap": { # Limits describes the maximum amount of compute resources allowed. This is a temporary field created to migrate away from the map limits field. This is done to become compliant with k8s style API. This field is deprecated in favor of limits field. - "a_key": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto - "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". - }, - }, - "requests": { # Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go + "requests": { # (Optional) Only memory and CPU are supported. Note: The only supported values for CPU are '1' and '2'. Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go "a_key": "A String", }, - "requestsInMap": { # Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. This is a temporary field created to migrate away from the map requests field. This is done to become compliant with k8s style API. This field is deprecated in favor of requests field. - "a_key": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto - "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". - }, - }, }, - "securityContext": { # SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence. # Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +optional - "allowPrivilegeEscalation": True or False, # AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN +optional - "capabilities": { # Adds and removes POSIX capabilities from running containers. # The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. +optional - "add": [ # Added capabilities +optional + "securityContext": { # Not supported by Cloud Run SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence. # (Optional) Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + "runAsUser": 42, # (Optional) The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + }, + "startupProbe": { # Not supported by Cloud Run Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # (Optional) Startup probe of application within the container. All other probes are disabled if a startup probe is provided, until it succeeds. Container will not be added to service endpoints if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "exec": { # Not supported by Cloud Run ExecAction describes a "run in container" action. # (Optional) One and only one of the following should be specified. Exec specifies the action to take. A field inlined from the Handler message. + "command": [ # (Optional) Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. "A String", ], - "drop": [ # Removed capabilities +optional - "A String", + }, + "failureThreshold": 42, # (Optional) Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + "httpGet": { # Not supported by Cloud Run HTTPGetAction describes an action based on HTTP Get requests. # (Optional) HTTPGet specifies the http request to perform. A field inlined from the Handler message. + "host": "A String", # (Optional) Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + "httpHeaders": [ # (Optional) Custom headers to set in the request. HTTP allows repeated headers. + { # Not supported by Cloud Run HTTPHeader describes a custom header to be used in HTTP probes + "name": "A String", # The header field name + "value": "A String", # The header field value + }, ], + "path": "A String", # (Optional) Path to access on the HTTP server. + "scheme": "A String", # (Optional) Scheme to use for connecting to the host. Defaults to HTTP. }, - "privileged": True or False, # Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. +optional - "readOnlyRootFilesystem": True or False, # Whether this container has a read-only root filesystem. Default is false. +optional - "runAsGroup": 42, # The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "runAsNonRoot": True or False, # Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "runAsUser": 42, # The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "seLinuxOptions": { # SELinuxOptions are the labels to be applied to the container # The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "level": "A String", # Level is SELinux level label that applies to the container. +optional - "role": "A String", # Role is a SELinux role label that applies to the container. +optional - "type": "A String", # Type is a SELinux type label that applies to the container. +optional - "user": "A String", # User is a SELinux user label that applies to the container. +optional + "initialDelaySeconds": 42, # (Optional) Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "periodSeconds": 42, # (Optional) How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + "successThreshold": 42, # (Optional) Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + "tcpSocket": { # Not supported by Cloud Run TCPSocketAction describes an action based on opening a socket # (Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported A field inlined from the Handler message. + "host": "A String", # (Optional) Optional: Host name to connect to, defaults to the pod IP. + "port": 42, # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. This field is currently limited to integer types only because of proto's inability to properly support the IntOrString golang type. }, + "timeoutSeconds": 42, # (Optional) Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes }, - "stdin": True or False, # Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. +optional - "stdinOnce": True or False, # Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false +optional - "terminationMessagePath": "A String", # Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. +optional - "terminationMessagePolicy": "A String", # Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. +optional - "tty": True or False, # Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. +optional - "volumeDevices": [ # volumeDevices is the list of block devices to be used by the container. This is an alpha feature and may change in the future. +optional - { # volumeDevice describes a mapping of a raw block device within a container. - "devicePath": "A String", # devicePath is the path inside of the container that the device will be mapped to. - "name": "A String", # name must match the name of a persistentVolumeClaim in the pod - }, - ], - "volumeMounts": [ # Pod volumes to mount into the container's filesystem. Cannot be updated. +optional - { # VolumeMount describes a mounting of a Volume within a container. + "terminationMessagePath": "A String", # (Optional) Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. + "terminationMessagePolicy": "A String", # (Optional) Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + "volumeMounts": [ # (Optional) Volume to mount into the container's filesystem. Only supports SecretVolumeSources. Pod volumes to mount into the container's filesystem. + { # Not supported by Cloud Run VolumeMount describes a mounting of a Volume within a container. "mountPath": "A String", # Path within the container at which the volume should be mounted. Must not contain ':'. - "mountPropagation": "A String", # mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationHostToContainer is used. This field is beta in 1.10. +optional "name": "A String", # This must match the Name of a Volume. - "readOnly": True or False, # Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. +optional - "subPath": "A String", # Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). +optional + "readOnly": True or False, # (Optional) Only true is accepted. Defaults to true. + "subPath": "A String", # (Optional) Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). }, ], - "workingDir": "A String", # Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. +optional + "workingDir": "A String", # (Optional) Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. }, ], "restartPolicy": "A String", # Optional. Restart policy for all containers within the instance. Allowed values are: - OnFailure: Instances will always be restarted on failure if the backoffLimit has not been reached. - Never: Instances are never restarted and all failures are permanent. Cannot be used if backoffLimit is set. +optional "serviceAccountName": "A String", # Optional. Email address of the IAM service account associated with the instance of a Job. The service account represents the identity of the running instance, and determines what permissions the instance has. If not provided, the instance will use the project's default service account. +optional "terminationGracePeriodSeconds": "A String", # Optional. Optional duration in seconds the instance needs to terminate gracefully. Value must be non-negative integer. The value zero indicates delete immediately. The grace period is the duration in seconds after the processes running in the instance are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. +optional "volumes": [ # Optional. List of volumes that can be mounted by containers belonging to the instance. More info: https://kubernetes.io/docs/concepts/storage/volumes +optional - { # Volume represents a named volume in a container. - "configMap": { # Adapts a ConfigMap into a volume. The contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. - "defaultMode": 42, # Mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. - "items": [ # If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. + { # Not supported by Cloud Run Volume represents a named volume in a container. + "configMap": { # Not supported by Cloud Run Adapts a ConfigMap into a volume. The contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. + "defaultMode": 42, # (Optional) Mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + "items": [ # (Optional) If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified that is not present in the Secret, the volume setup will error unless it is marked optional. { # Maps a string key to a path within a volume. - "key": "A String", # Cloud Run fully managed: supported The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. Cloud Run for Anthos: supported The key to project. - "mode": 42, # Mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. +optional - "path": "A String", # Cloud Run fully managed: supported Cloud Run for Anthos: supported The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + "key": "A String", # The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. The key to project. + "mode": 42, # (Optional) Mode bits to use on this file, must be a value between 0000 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + "path": "A String", # The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. }, ], "name": "A String", # Name of the config. - "optional": True or False, # Specify whether the Secret or its keys must be defined. + "optional": True or False, # (Optional) Specify whether the Secret or its keys must be defined. }, "name": "A String", # Volume's name. - "secret": { # The contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. - "defaultMode": 42, # Mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. - "items": [ # Cloud Run fully managed: supported If unspecified, the volume will expose a file whose name is the secret_name. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a key and a path. Cloud Run for Anthos: supported If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. + "secret": { # The secret's value will be presented as the content of a file whose name is defined in the item path. If no items are defined, the name of the file is the secret_name. The contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. + "defaultMode": 42, # (Optional) Mode bits to use on created files by default. Must be a value between 0000 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. NOTE: This is an integer representation of the mode bits. So, the integer value should look exactly as the chmod numeric notation, i.e. Unix chmod "777" (a=rwx) should have the integer value 777. + "items": [ # (Optional) If unspecified, the volume will expose a file whose name is the secret_name. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a key and a path. If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified that is not present in the Secret, the volume setup will error unless it is marked optional. { # Maps a string key to a path within a volume. - "key": "A String", # Cloud Run fully managed: supported The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. Cloud Run for Anthos: supported The key to project. - "mode": 42, # Mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. +optional - "path": "A String", # Cloud Run fully managed: supported Cloud Run for Anthos: supported The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + "key": "A String", # The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. The key to project. + "mode": 42, # (Optional) Mode bits to use on this file, must be a value between 0000 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + "path": "A String", # The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. }, ], - "optional": True or False, # Specify whether the Secret or its keys must be defined. - "secretName": "A String", # Cloud Run fully managed: supported The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. Cloud Run for Anthos: supported Name of the secret in the container's namespace to use. + "optional": True or False, # (Optional) Specify whether the Secret or its keys must be defined. + "secretName": "A String", # The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. Name of the secret in the container's namespace to use. }, }, ], @@ -492,25 +392,25 @@Method Details
{ # Job represents the configuration of a single job. A job an immutable resource that references a container image which is run to completion. "apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources +optional "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds +optional - "metadata": { # ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional - "annotations": { # Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations +optional + "metadata": { # k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional + "annotations": { # (Optional) Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations "a_key": "A String", }, - "clusterName": "A String", # Not currently supported by Cloud Run. The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. +optional - "creationTimestamp": "A String", # CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional - "deletionGracePeriodSeconds": 42, # Not currently supported by Cloud Run. Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. +optional - "deletionTimestamp": "A String", # DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional - "finalizers": [ # Not currently supported by Cloud Run. Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. +optional +patchStrategy=merge + "clusterName": "A String", # (Optional) Not supported by Cloud Run The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + "creationTimestamp": "A String", # (Optional) CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + "deletionGracePeriodSeconds": 42, # (Optional) Not supported by Cloud Run Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. + "deletionTimestamp": "A String", # (Optional) Not supported by Cloud Run DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + "finalizers": [ # (Optional) Not supported by Cloud Run Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. +patchStrategy=merge "A String", ], - "generateName": "A String", # Not currently supported by Cloud Run. GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency +optional string generateName = 2; - "generation": 42, # A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. +optional - "labels": { # Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and routes. More info: http://kubernetes.io/docs/user-guide/labels +optional + "generateName": "A String", # (Optional) Not supported by Cloud Run GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency string generateName = 2; + "generation": 42, # (Optional) A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. + "labels": { # (Optional) Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and routes. More info: http://kubernetes.io/docs/user-guide/labels "a_key": "A String", }, "name": "A String", # Name must be unique within a namespace, within a Cloud Run region. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names +optional "namespace": "A String", # Namespace defines the space within each name must be unique, within a Cloud Run region. In Cloud Run the namespace must be equal to either the project ID or project number. - "ownerReferences": [ # List of objects that own this object. If ALL objects in the list have been deleted, this object will be garbage collected. +optional + "ownerReferences": [ # (Optional) Not supported by Cloud Run List of objects that own this object. If ALL objects in the list have been deleted, this object will be garbage collected. { # OwnerReference contains enough information to let you identify an owning object. Currently, an owning object must be in the same namespace, so there is no namespace field. "apiVersion": "A String", # API version of the referent. "blockOwnerDeletion": True or False, # If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. +optional @@ -520,9 +420,9 @@Method Details
"uid": "A String", # UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids }, ], - "resourceVersion": "A String", # An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency +optional - "selfLink": "A String", # SelfLink is a URL representing this object. Populated by the system. Read-only. +optional string selfLink = 4; - "uid": "A String", # UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids +optional + "resourceVersion": "A String", # Optional. An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server or omit the value to disable conflict-detection. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients or omitted. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + "selfLink": "A String", # (Optional) SelfLink is a URL representing this object. Populated by the system. Read-only. string selfLink = 4; + "uid": "A String", # (Optional) UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids }, "spec": { # JobSpec describes how the job execution will look like. # Optional. Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status +optional "activeDeadlineSeconds": "A String", # Optional. Not supported. Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it. If set to zero, the system will never attempt to terminate the job based on time. Otherwise, the value must be positive integer. +optional @@ -534,299 +434,199 @@Method Details
"activeDeadlineSeconds": "A String", # Optional. Optional duration in seconds the instance may be active relative to StartTime before the system will actively try to mark it failed and kill associated containers. If set to zero, the system will never attempt to kill an instance based on time. Otherwise, value must be a positive integer. +optional "containers": [ # Optional. List of containers belonging to the instance. We disallow a number of fields on this Container. Only a single container may be provided. { # A single application container. This specifies both the container to run, the command to run in the container and the arguments to supply to it. Note that additional arguments may be supplied by the system to the container at runtime. - "args": [ # Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell +optional + "args": [ # (Optional) Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell "A String", ], - "command": [ # Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell +optional + "command": [ "A String", ], - "env": [ # List of environment variables to set in the container. Cannot be updated. +optional + "env": [ # (Optional) List of environment variables to set in the container. { # EnvVar represents an environment variable present in a Container. "name": "A String", # Name of the environment variable. Must be a C_IDENTIFIER. - "value": "A String", # Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". +optional - "valueFrom": { # Cloud Run fully managed: not supported Cloud Run on GKE: supported EnvVarSource represents a source for the value of an EnvVar. # Cloud Run fully managed: supported Source for the environment variable's value. Only supports secret_key_ref. Cloud Run for Anthos: supported Source for the environment variable's value. Cannot be used if value is not empty. +optional - "configMapKeyRef": { # Cloud Run fully managed: not supported Cloud Run on GKE: supported Selects a key from a ConfigMap. # Cloud Run fully managed: not supported Cloud Run on GKE: supported Selects a key of a ConfigMap. +optional - "key": "A String", # Cloud Run fully managed: not supported Cloud Run on GKE: supported The key to select. - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "value": "A String", # (Optional) Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". + "valueFrom": { # EnvVarSource represents a source for the value of an EnvVar. # (Optional) Source for the environment variable's value. Only supports secret_key_ref. Source for the environment variable's value. Cannot be used if value is not empty. + "configMapKeyRef": { # Not supported by Cloud Run Selects a key from a ConfigMap. # (Optional) Not supported by Cloud Run Selects a key of a ConfigMap. + "key": "A String", # The key to select. + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: not supported Cloud Run on GKE: supported The ConfigMap to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run on GKE: supported Specify whether the ConfigMap or its key must be defined +optional + "name": "A String", # The ConfigMap to select from. + "optional": True or False, # (Optional) Specify whether the ConfigMap or its key must be defined }, - "secretKeyRef": { # Cloud Run fully managed: supported Cloud Run on GKE: supported SecretKeySelector selects a key of a Secret. # Cloud Run fully managed: supported. Selects a key (version) of a secret in Secret Manager. Cloud Run for Anthos: supported. Selects a key of a secret in the pod's namespace. +optional - "key": "A String", # Cloud Run fully managed: supported A Cloud Secret Manager secret version. Must be 'latest' for the latest version or an integer for a specific version. Cloud Run for Anthos: supported The key of the secret to select from. Must be a valid secret key. - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "secretKeyRef": { # SecretKeySelector selects a key of a Secret. # (Optional) Selects a key (version) of a secret in Secret Manager. + "key": "A String", # A Cloud Secret Manager secret version. Must be 'latest' for the latest version or an integer for a specific version. The key of the secret to select from. Must be a valid secret key. + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: supported The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. Cloud Run for Anthos: supported The name of the secret in the pod's namespace to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run on GKE: supported Specify whether the Secret or its key must be defined +optional + "name": "A String", # The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. The name of the secret in the pod's namespace to select from. + "optional": True or False, # (Optional) Specify whether the Secret or its key must be defined }, }, }, ], - "envFrom": [ # List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. +optional - { # EnvFromSource represents the source of a set of ConfigMaps - "configMapRef": { # ConfigMapEnvSource selects a ConfigMap to populate the environment variables with. The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables. # The ConfigMap to select from +optional - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "envFrom": [ # (Optional) List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + { # Not supported by Cloud Run EnvFromSource represents the source of a set of ConfigMaps + "configMapRef": { # Not supported by Cloud Run ConfigMapEnvSource selects a ConfigMap to populate the environment variables with. The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables. # (Optional) The ConfigMap to select from + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: not supported Cloud Run for Anthos: supported The ConfigMap to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run for Anthos: supported Specify whether the ConfigMap must be defined +optional + "name": "A String", # The ConfigMap to select from. + "optional": True or False, # (Optional) Specify whether the ConfigMap must be defined }, - "prefix": "A String", # An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. +optional - "secretRef": { # SecretEnvSource selects a Secret to populate the environment variables with. The contents of the target Secret's Data field will represent the key-value pairs as environment variables. # The Secret to select from +optional - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "prefix": "A String", # (Optional) An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + "secretRef": { # Not supported by Cloud Run SecretEnvSource selects a Secret to populate the environment variables with. The contents of the target Secret's Data field will represent the key-value pairs as environment variables. # (Optional) The Secret to select from + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: not supported Cloud Run for Anthos: supported The Secret to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run for Anthos: supported Specify whether the Secret must be defined +optional + "name": "A String", # The Secret to select from. + "optional": True or False, # (Optional) Specify whether the Secret must be defined }, }, ], - "image": "A String", # Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images - "imagePullPolicy": "A String", # Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images +optional - "lifecycle": { # Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted. # Actions that the management system should take in response to container lifecycle events. Cannot be updated. +optional - "postStart": { # Handler defines a specific action that should be taken # PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +optional - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - }, + "image": "A String", # Only supports containers from Google Container Registry or Artifact Registry URL of the Container image. More info: https://kubernetes.io/docs/concepts/containers/images + "imagePullPolicy": "A String", # (Optional) Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + "livenessProbe": { # Not supported by Cloud Run Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # (Optional) Periodic probe of container liveness. Container will be restarted if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "exec": { # Not supported by Cloud Run ExecAction describes a "run in container" action. # (Optional) One and only one of the following should be specified. Exec specifies the action to take. A field inlined from the Handler message. + "command": [ # (Optional) Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + "A String", + ], }, - "preStop": { # Handler defines a specific action that should be taken # PreStop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +optional - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. + "failureThreshold": 42, # (Optional) Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + "httpGet": { # Not supported by Cloud Run HTTPGetAction describes an action based on HTTP Get requests. # (Optional) HTTPGet specifies the http request to perform. A field inlined from the Handler message. + "host": "A String", # (Optional) Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + "httpHeaders": [ # (Optional) Custom headers to set in the request. HTTP allows repeated headers. + { # Not supported by Cloud Run HTTPHeader describes a custom header to be used in HTTP probes + "name": "A String", # The header field name + "value": "A String", # The header field value }, - }, + ], + "path": "A String", # (Optional) Path to access on the HTTP server. + "scheme": "A String", # (Optional) Scheme to use for connecting to the host. Defaults to HTTP. }, - }, - "livenessProbe": { # Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "failureThreshold": 42, # Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. +optional - "handler": { # Handler defines a specific action that should be taken # The action taken to determine the health of a container - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - }, + "initialDelaySeconds": 42, # (Optional) Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "periodSeconds": 42, # (Optional) How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + "successThreshold": 42, # (Optional) Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + "tcpSocket": { # Not supported by Cloud Run TCPSocketAction describes an action based on opening a socket # (Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported A field inlined from the Handler message. + "host": "A String", # (Optional) Optional: Host name to connect to, defaults to the pod IP. + "port": 42, # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. This field is currently limited to integer types only because of proto's inability to properly support the IntOrString golang type. }, - "initialDelaySeconds": 42, # Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "periodSeconds": 42, # How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. +optional - "successThreshold": 42, # Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. +optional - "timeoutSeconds": 42, # Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional + "timeoutSeconds": 42, # (Optional) Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes }, - "name": "A String", # Name of the container specified as a DNS_LABEL. Each container must have a unique name (DNS_LABEL). Cannot be updated. - "ports": [ # List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated. +optional + "name": "A String", # (Optional) Name of the container specified as a DNS_LABEL. Currently unused in Cloud Run. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names + "ports": [ # (Optional) List of ports to expose from the container. Only a single port can be specified. The specified ports must be listening on all interfaces (0.0.0.0) within the container to be accessible. If omitted, a port number will be chosen and passed to the container through the PORT environment variable for the container to listen on. { # ContainerPort represents a network port in a single container. - "containerPort": 42, # Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. - "hostIP": "A String", # What host IP to bind the external port to. +optional - "hostPort": 42, # Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. +optional - "name": "A String", # If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. +optional - "protocol": "A String", # Protocol for port. Must be UDP or TCP. Defaults to "TCP". +optional + "containerPort": 42, # (Optional) Port number the container listens on. This must be a valid port number, 0 < x < 65536. + "name": "A String", # (Optional) If specified, used to specify which protocol to use. Allowed values are "http1" and "h2c". + "protocol": "A String", # (Optional) Protocol for port. Must be "TCP". Defaults to "TCP". }, ], - "readinessProbe": { # Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "failureThreshold": 42, # Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. +optional - "handler": { # Handler defines a specific action that should be taken # The action taken to determine the health of a container - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. + "readinessProbe": { # Not supported by Cloud Run Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # (Optional) Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "exec": { # Not supported by Cloud Run ExecAction describes a "run in container" action. # (Optional) One and only one of the following should be specified. Exec specifies the action to take. A field inlined from the Handler message. + "command": [ # (Optional) Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + "A String", + ], + }, + "failureThreshold": 42, # (Optional) Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + "httpGet": { # Not supported by Cloud Run HTTPGetAction describes an action based on HTTP Get requests. # (Optional) HTTPGet specifies the http request to perform. A field inlined from the Handler message. + "host": "A String", # (Optional) Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + "httpHeaders": [ # (Optional) Custom headers to set in the request. HTTP allows repeated headers. + { # Not supported by Cloud Run HTTPHeader describes a custom header to be used in HTTP probes + "name": "A String", # The header field name + "value": "A String", # The header field value }, - }, + ], + "path": "A String", # (Optional) Path to access on the HTTP server. + "scheme": "A String", # (Optional) Scheme to use for connecting to the host. Defaults to HTTP. + }, + "initialDelaySeconds": 42, # (Optional) Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "periodSeconds": 42, # (Optional) How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + "successThreshold": 42, # (Optional) Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + "tcpSocket": { # Not supported by Cloud Run TCPSocketAction describes an action based on opening a socket # (Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported A field inlined from the Handler message. + "host": "A String", # (Optional) Optional: Host name to connect to, defaults to the pod IP. + "port": 42, # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. This field is currently limited to integer types only because of proto's inability to properly support the IntOrString golang type. }, - "initialDelaySeconds": 42, # Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "periodSeconds": 42, # How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. +optional - "successThreshold": 42, # Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. +optional - "timeoutSeconds": 42, # Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional + "timeoutSeconds": 42, # (Optional) Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes }, - "resources": { # ResourceRequirements describes the compute resource requirements. # Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources +optional - "limits": { # Limits describes the maximum amount of compute resources allowed. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go + "resources": { # ResourceRequirements describes the compute resource requirements. # (Optional) Compute Resources required by this container. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + "limits": { # (Optional) Only memory and CPU are supported. Note: The only supported values for CPU are '1', '2', and '4'. Setting 4 CPU requires at least 2Gi of memory. Limits describes the maximum amount of compute resources allowed. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go "a_key": "A String", }, - "limitsInMap": { # Limits describes the maximum amount of compute resources allowed. This is a temporary field created to migrate away from the map limits field. This is done to become compliant with k8s style API. This field is deprecated in favor of limits field. - "a_key": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto - "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". - }, - }, - "requests": { # Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go + "requests": { # (Optional) Only memory and CPU are supported. Note: The only supported values for CPU are '1' and '2'. Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go "a_key": "A String", }, - "requestsInMap": { # Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. This is a temporary field created to migrate away from the map requests field. This is done to become compliant with k8s style API. This field is deprecated in favor of requests field. - "a_key": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto - "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". - }, - }, }, - "securityContext": { # SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence. # Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +optional - "allowPrivilegeEscalation": True or False, # AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN +optional - "capabilities": { # Adds and removes POSIX capabilities from running containers. # The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. +optional - "add": [ # Added capabilities +optional + "securityContext": { # Not supported by Cloud Run SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence. # (Optional) Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + "runAsUser": 42, # (Optional) The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + }, + "startupProbe": { # Not supported by Cloud Run Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # (Optional) Startup probe of application within the container. All other probes are disabled if a startup probe is provided, until it succeeds. Container will not be added to service endpoints if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "exec": { # Not supported by Cloud Run ExecAction describes a "run in container" action. # (Optional) One and only one of the following should be specified. Exec specifies the action to take. A field inlined from the Handler message. + "command": [ # (Optional) Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. "A String", ], - "drop": [ # Removed capabilities +optional - "A String", + }, + "failureThreshold": 42, # (Optional) Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + "httpGet": { # Not supported by Cloud Run HTTPGetAction describes an action based on HTTP Get requests. # (Optional) HTTPGet specifies the http request to perform. A field inlined from the Handler message. + "host": "A String", # (Optional) Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + "httpHeaders": [ # (Optional) Custom headers to set in the request. HTTP allows repeated headers. + { # Not supported by Cloud Run HTTPHeader describes a custom header to be used in HTTP probes + "name": "A String", # The header field name + "value": "A String", # The header field value + }, ], + "path": "A String", # (Optional) Path to access on the HTTP server. + "scheme": "A String", # (Optional) Scheme to use for connecting to the host. Defaults to HTTP. }, - "privileged": True or False, # Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. +optional - "readOnlyRootFilesystem": True or False, # Whether this container has a read-only root filesystem. Default is false. +optional - "runAsGroup": 42, # The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "runAsNonRoot": True or False, # Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "runAsUser": 42, # The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "seLinuxOptions": { # SELinuxOptions are the labels to be applied to the container # The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "level": "A String", # Level is SELinux level label that applies to the container. +optional - "role": "A String", # Role is a SELinux role label that applies to the container. +optional - "type": "A String", # Type is a SELinux type label that applies to the container. +optional - "user": "A String", # User is a SELinux user label that applies to the container. +optional + "initialDelaySeconds": 42, # (Optional) Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "periodSeconds": 42, # (Optional) How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + "successThreshold": 42, # (Optional) Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + "tcpSocket": { # Not supported by Cloud Run TCPSocketAction describes an action based on opening a socket # (Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported A field inlined from the Handler message. + "host": "A String", # (Optional) Optional: Host name to connect to, defaults to the pod IP. + "port": 42, # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. This field is currently limited to integer types only because of proto's inability to properly support the IntOrString golang type. }, + "timeoutSeconds": 42, # (Optional) Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes }, - "stdin": True or False, # Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. +optional - "stdinOnce": True or False, # Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false +optional - "terminationMessagePath": "A String", # Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. +optional - "terminationMessagePolicy": "A String", # Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. +optional - "tty": True or False, # Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. +optional - "volumeDevices": [ # volumeDevices is the list of block devices to be used by the container. This is an alpha feature and may change in the future. +optional - { # volumeDevice describes a mapping of a raw block device within a container. - "devicePath": "A String", # devicePath is the path inside of the container that the device will be mapped to. - "name": "A String", # name must match the name of a persistentVolumeClaim in the pod - }, - ], - "volumeMounts": [ # Pod volumes to mount into the container's filesystem. Cannot be updated. +optional - { # VolumeMount describes a mounting of a Volume within a container. + "terminationMessagePath": "A String", # (Optional) Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. + "terminationMessagePolicy": "A String", # (Optional) Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + "volumeMounts": [ # (Optional) Volume to mount into the container's filesystem. Only supports SecretVolumeSources. Pod volumes to mount into the container's filesystem. + { # Not supported by Cloud Run VolumeMount describes a mounting of a Volume within a container. "mountPath": "A String", # Path within the container at which the volume should be mounted. Must not contain ':'. - "mountPropagation": "A String", # mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationHostToContainer is used. This field is beta in 1.10. +optional "name": "A String", # This must match the Name of a Volume. - "readOnly": True or False, # Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. +optional - "subPath": "A String", # Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). +optional + "readOnly": True or False, # (Optional) Only true is accepted. Defaults to true. + "subPath": "A String", # (Optional) Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). }, ], - "workingDir": "A String", # Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. +optional + "workingDir": "A String", # (Optional) Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. }, ], "restartPolicy": "A String", # Optional. Restart policy for all containers within the instance. Allowed values are: - OnFailure: Instances will always be restarted on failure if the backoffLimit has not been reached. - Never: Instances are never restarted and all failures are permanent. Cannot be used if backoffLimit is set. +optional "serviceAccountName": "A String", # Optional. Email address of the IAM service account associated with the instance of a Job. The service account represents the identity of the running instance, and determines what permissions the instance has. If not provided, the instance will use the project's default service account. +optional "terminationGracePeriodSeconds": "A String", # Optional. Optional duration in seconds the instance needs to terminate gracefully. Value must be non-negative integer. The value zero indicates delete immediately. The grace period is the duration in seconds after the processes running in the instance are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. +optional "volumes": [ # Optional. List of volumes that can be mounted by containers belonging to the instance. More info: https://kubernetes.io/docs/concepts/storage/volumes +optional - { # Volume represents a named volume in a container. - "configMap": { # Adapts a ConfigMap into a volume. The contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. - "defaultMode": 42, # Mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. - "items": [ # If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. + { # Not supported by Cloud Run Volume represents a named volume in a container. + "configMap": { # Not supported by Cloud Run Adapts a ConfigMap into a volume. The contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. + "defaultMode": 42, # (Optional) Mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + "items": [ # (Optional) If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified that is not present in the Secret, the volume setup will error unless it is marked optional. { # Maps a string key to a path within a volume. - "key": "A String", # Cloud Run fully managed: supported The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. Cloud Run for Anthos: supported The key to project. - "mode": 42, # Mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. +optional - "path": "A String", # Cloud Run fully managed: supported Cloud Run for Anthos: supported The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + "key": "A String", # The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. The key to project. + "mode": 42, # (Optional) Mode bits to use on this file, must be a value between 0000 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + "path": "A String", # The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. }, ], "name": "A String", # Name of the config. - "optional": True or False, # Specify whether the Secret or its keys must be defined. + "optional": True or False, # (Optional) Specify whether the Secret or its keys must be defined. }, "name": "A String", # Volume's name. - "secret": { # The contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. - "defaultMode": 42, # Mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. - "items": [ # Cloud Run fully managed: supported If unspecified, the volume will expose a file whose name is the secret_name. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a key and a path. Cloud Run for Anthos: supported If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. + "secret": { # The secret's value will be presented as the content of a file whose name is defined in the item path. If no items are defined, the name of the file is the secret_name. The contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. + "defaultMode": 42, # (Optional) Mode bits to use on created files by default. Must be a value between 0000 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. NOTE: This is an integer representation of the mode bits. So, the integer value should look exactly as the chmod numeric notation, i.e. Unix chmod "777" (a=rwx) should have the integer value 777. + "items": [ # (Optional) If unspecified, the volume will expose a file whose name is the secret_name. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a key and a path. If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified that is not present in the Secret, the volume setup will error unless it is marked optional. { # Maps a string key to a path within a volume. - "key": "A String", # Cloud Run fully managed: supported The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. Cloud Run for Anthos: supported The key to project. - "mode": 42, # Mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. +optional - "path": "A String", # Cloud Run fully managed: supported Cloud Run for Anthos: supported The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + "key": "A String", # The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. The key to project. + "mode": 42, # (Optional) Mode bits to use on this file, must be a value between 0000 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + "path": "A String", # The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. }, ], - "optional": True or False, # Specify whether the Secret or its keys must be defined. - "secretName": "A String", # Cloud Run fully managed: supported The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. Cloud Run for Anthos: supported Name of the secret in the container's namespace to use. + "optional": True or False, # (Optional) Specify whether the Secret or its keys must be defined. + "secretName": "A String", # The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. Name of the secret in the container's namespace to use. }, }, ], @@ -905,25 +705,25 @@Method Details
{ # Job represents the configuration of a single job. A job an immutable resource that references a container image which is run to completion. "apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources +optional "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds +optional - "metadata": { # ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional - "annotations": { # Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations +optional + "metadata": { # k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional + "annotations": { # (Optional) Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations "a_key": "A String", }, - "clusterName": "A String", # Not currently supported by Cloud Run. The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. +optional - "creationTimestamp": "A String", # CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional - "deletionGracePeriodSeconds": 42, # Not currently supported by Cloud Run. Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. +optional - "deletionTimestamp": "A String", # DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional - "finalizers": [ # Not currently supported by Cloud Run. Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. +optional +patchStrategy=merge + "clusterName": "A String", # (Optional) Not supported by Cloud Run The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + "creationTimestamp": "A String", # (Optional) CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + "deletionGracePeriodSeconds": 42, # (Optional) Not supported by Cloud Run Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. + "deletionTimestamp": "A String", # (Optional) Not supported by Cloud Run DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + "finalizers": [ # (Optional) Not supported by Cloud Run Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. +patchStrategy=merge "A String", ], - "generateName": "A String", # Not currently supported by Cloud Run. GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency +optional string generateName = 2; - "generation": 42, # A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. +optional - "labels": { # Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and routes. More info: http://kubernetes.io/docs/user-guide/labels +optional + "generateName": "A String", # (Optional) Not supported by Cloud Run GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency string generateName = 2; + "generation": 42, # (Optional) A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. + "labels": { # (Optional) Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and routes. More info: http://kubernetes.io/docs/user-guide/labels "a_key": "A String", }, "name": "A String", # Name must be unique within a namespace, within a Cloud Run region. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names +optional "namespace": "A String", # Namespace defines the space within each name must be unique, within a Cloud Run region. In Cloud Run the namespace must be equal to either the project ID or project number. - "ownerReferences": [ # List of objects that own this object. If ALL objects in the list have been deleted, this object will be garbage collected. +optional + "ownerReferences": [ # (Optional) Not supported by Cloud Run List of objects that own this object. If ALL objects in the list have been deleted, this object will be garbage collected. { # OwnerReference contains enough information to let you identify an owning object. Currently, an owning object must be in the same namespace, so there is no namespace field. "apiVersion": "A String", # API version of the referent. "blockOwnerDeletion": True or False, # If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. +optional @@ -933,9 +733,9 @@Method Details
"uid": "A String", # UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids }, ], - "resourceVersion": "A String", # An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency +optional - "selfLink": "A String", # SelfLink is a URL representing this object. Populated by the system. Read-only. +optional string selfLink = 4; - "uid": "A String", # UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids +optional + "resourceVersion": "A String", # Optional. An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server or omit the value to disable conflict-detection. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients or omitted. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + "selfLink": "A String", # (Optional) SelfLink is a URL representing this object. Populated by the system. Read-only. string selfLink = 4; + "uid": "A String", # (Optional) UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids }, "spec": { # JobSpec describes how the job execution will look like. # Optional. Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status +optional "activeDeadlineSeconds": "A String", # Optional. Not supported. Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it. If set to zero, the system will never attempt to terminate the job based on time. Otherwise, the value must be positive integer. +optional @@ -947,299 +747,199 @@Method Details
"activeDeadlineSeconds": "A String", # Optional. Optional duration in seconds the instance may be active relative to StartTime before the system will actively try to mark it failed and kill associated containers. If set to zero, the system will never attempt to kill an instance based on time. Otherwise, value must be a positive integer. +optional "containers": [ # Optional. List of containers belonging to the instance. We disallow a number of fields on this Container. Only a single container may be provided. { # A single application container. This specifies both the container to run, the command to run in the container and the arguments to supply to it. Note that additional arguments may be supplied by the system to the container at runtime. - "args": [ # Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell +optional + "args": [ # (Optional) Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell "A String", ], - "command": [ # Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell +optional + "command": [ "A String", ], - "env": [ # List of environment variables to set in the container. Cannot be updated. +optional + "env": [ # (Optional) List of environment variables to set in the container. { # EnvVar represents an environment variable present in a Container. "name": "A String", # Name of the environment variable. Must be a C_IDENTIFIER. - "value": "A String", # Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". +optional - "valueFrom": { # Cloud Run fully managed: not supported Cloud Run on GKE: supported EnvVarSource represents a source for the value of an EnvVar. # Cloud Run fully managed: supported Source for the environment variable's value. Only supports secret_key_ref. Cloud Run for Anthos: supported Source for the environment variable's value. Cannot be used if value is not empty. +optional - "configMapKeyRef": { # Cloud Run fully managed: not supported Cloud Run on GKE: supported Selects a key from a ConfigMap. # Cloud Run fully managed: not supported Cloud Run on GKE: supported Selects a key of a ConfigMap. +optional - "key": "A String", # Cloud Run fully managed: not supported Cloud Run on GKE: supported The key to select. - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "value": "A String", # (Optional) Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". + "valueFrom": { # EnvVarSource represents a source for the value of an EnvVar. # (Optional) Source for the environment variable's value. Only supports secret_key_ref. Source for the environment variable's value. Cannot be used if value is not empty. + "configMapKeyRef": { # Not supported by Cloud Run Selects a key from a ConfigMap. # (Optional) Not supported by Cloud Run Selects a key of a ConfigMap. + "key": "A String", # The key to select. + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: not supported Cloud Run on GKE: supported The ConfigMap to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run on GKE: supported Specify whether the ConfigMap or its key must be defined +optional + "name": "A String", # The ConfigMap to select from. + "optional": True or False, # (Optional) Specify whether the ConfigMap or its key must be defined }, - "secretKeyRef": { # Cloud Run fully managed: supported Cloud Run on GKE: supported SecretKeySelector selects a key of a Secret. # Cloud Run fully managed: supported. Selects a key (version) of a secret in Secret Manager. Cloud Run for Anthos: supported. Selects a key of a secret in the pod's namespace. +optional - "key": "A String", # Cloud Run fully managed: supported A Cloud Secret Manager secret version. Must be 'latest' for the latest version or an integer for a specific version. Cloud Run for Anthos: supported The key of the secret to select from. Must be a valid secret key. - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "secretKeyRef": { # SecretKeySelector selects a key of a Secret. # (Optional) Selects a key (version) of a secret in Secret Manager. + "key": "A String", # A Cloud Secret Manager secret version. Must be 'latest' for the latest version or an integer for a specific version. The key of the secret to select from. Must be a valid secret key. + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: supported The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. Cloud Run for Anthos: supported The name of the secret in the pod's namespace to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run on GKE: supported Specify whether the Secret or its key must be defined +optional + "name": "A String", # The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. The name of the secret in the pod's namespace to select from. + "optional": True or False, # (Optional) Specify whether the Secret or its key must be defined }, }, }, ], - "envFrom": [ # List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. +optional - { # EnvFromSource represents the source of a set of ConfigMaps - "configMapRef": { # ConfigMapEnvSource selects a ConfigMap to populate the environment variables with. The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables. # The ConfigMap to select from +optional - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "envFrom": [ # (Optional) List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + { # Not supported by Cloud Run EnvFromSource represents the source of a set of ConfigMaps + "configMapRef": { # Not supported by Cloud Run ConfigMapEnvSource selects a ConfigMap to populate the environment variables with. The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables. # (Optional) The ConfigMap to select from + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: not supported Cloud Run for Anthos: supported The ConfigMap to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run for Anthos: supported Specify whether the ConfigMap must be defined +optional + "name": "A String", # The ConfigMap to select from. + "optional": True or False, # (Optional) Specify whether the ConfigMap must be defined }, - "prefix": "A String", # An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. +optional - "secretRef": { # SecretEnvSource selects a Secret to populate the environment variables with. The contents of the target Secret's Data field will represent the key-value pairs as environment variables. # The Secret to select from +optional - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "prefix": "A String", # (Optional) An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + "secretRef": { # Not supported by Cloud Run SecretEnvSource selects a Secret to populate the environment variables with. The contents of the target Secret's Data field will represent the key-value pairs as environment variables. # (Optional) The Secret to select from + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: not supported Cloud Run for Anthos: supported The Secret to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run for Anthos: supported Specify whether the Secret must be defined +optional + "name": "A String", # The Secret to select from. + "optional": True or False, # (Optional) Specify whether the Secret must be defined }, }, ], - "image": "A String", # Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images - "imagePullPolicy": "A String", # Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images +optional - "lifecycle": { # Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted. # Actions that the management system should take in response to container lifecycle events. Cannot be updated. +optional - "postStart": { # Handler defines a specific action that should be taken # PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +optional - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - }, + "image": "A String", # Only supports containers from Google Container Registry or Artifact Registry URL of the Container image. More info: https://kubernetes.io/docs/concepts/containers/images + "imagePullPolicy": "A String", # (Optional) Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + "livenessProbe": { # Not supported by Cloud Run Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # (Optional) Periodic probe of container liveness. Container will be restarted if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "exec": { # Not supported by Cloud Run ExecAction describes a "run in container" action. # (Optional) One and only one of the following should be specified. Exec specifies the action to take. A field inlined from the Handler message. + "command": [ # (Optional) Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + "A String", + ], }, - "preStop": { # Handler defines a specific action that should be taken # PreStop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +optional - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. + "failureThreshold": 42, # (Optional) Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + "httpGet": { # Not supported by Cloud Run HTTPGetAction describes an action based on HTTP Get requests. # (Optional) HTTPGet specifies the http request to perform. A field inlined from the Handler message. + "host": "A String", # (Optional) Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + "httpHeaders": [ # (Optional) Custom headers to set in the request. HTTP allows repeated headers. + { # Not supported by Cloud Run HTTPHeader describes a custom header to be used in HTTP probes + "name": "A String", # The header field name + "value": "A String", # The header field value }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - }, + ], + "path": "A String", # (Optional) Path to access on the HTTP server. + "scheme": "A String", # (Optional) Scheme to use for connecting to the host. Defaults to HTTP. }, - }, - "livenessProbe": { # Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "failureThreshold": 42, # Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. +optional - "handler": { # Handler defines a specific action that should be taken # The action taken to determine the health of a container - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - }, + "initialDelaySeconds": 42, # (Optional) Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "periodSeconds": 42, # (Optional) How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + "successThreshold": 42, # (Optional) Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + "tcpSocket": { # Not supported by Cloud Run TCPSocketAction describes an action based on opening a socket # (Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported A field inlined from the Handler message. + "host": "A String", # (Optional) Optional: Host name to connect to, defaults to the pod IP. + "port": 42, # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. This field is currently limited to integer types only because of proto's inability to properly support the IntOrString golang type. }, - "initialDelaySeconds": 42, # Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "periodSeconds": 42, # How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. +optional - "successThreshold": 42, # Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. +optional - "timeoutSeconds": 42, # Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional + "timeoutSeconds": 42, # (Optional) Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes }, - "name": "A String", # Name of the container specified as a DNS_LABEL. Each container must have a unique name (DNS_LABEL). Cannot be updated. - "ports": [ # List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated. +optional + "name": "A String", # (Optional) Name of the container specified as a DNS_LABEL. Currently unused in Cloud Run. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names + "ports": [ # (Optional) List of ports to expose from the container. Only a single port can be specified. The specified ports must be listening on all interfaces (0.0.0.0) within the container to be accessible. If omitted, a port number will be chosen and passed to the container through the PORT environment variable for the container to listen on. { # ContainerPort represents a network port in a single container. - "containerPort": 42, # Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. - "hostIP": "A String", # What host IP to bind the external port to. +optional - "hostPort": 42, # Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. +optional - "name": "A String", # If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. +optional - "protocol": "A String", # Protocol for port. Must be UDP or TCP. Defaults to "TCP". +optional + "containerPort": 42, # (Optional) Port number the container listens on. This must be a valid port number, 0 < x < 65536. + "name": "A String", # (Optional) If specified, used to specify which protocol to use. Allowed values are "http1" and "h2c". + "protocol": "A String", # (Optional) Protocol for port. Must be "TCP". Defaults to "TCP". }, ], - "readinessProbe": { # Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "failureThreshold": 42, # Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. +optional - "handler": { # Handler defines a specific action that should be taken # The action taken to determine the health of a container - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. + "readinessProbe": { # Not supported by Cloud Run Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # (Optional) Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "exec": { # Not supported by Cloud Run ExecAction describes a "run in container" action. # (Optional) One and only one of the following should be specified. Exec specifies the action to take. A field inlined from the Handler message. + "command": [ # (Optional) Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + "A String", + ], + }, + "failureThreshold": 42, # (Optional) Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + "httpGet": { # Not supported by Cloud Run HTTPGetAction describes an action based on HTTP Get requests. # (Optional) HTTPGet specifies the http request to perform. A field inlined from the Handler message. + "host": "A String", # (Optional) Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + "httpHeaders": [ # (Optional) Custom headers to set in the request. HTTP allows repeated headers. + { # Not supported by Cloud Run HTTPHeader describes a custom header to be used in HTTP probes + "name": "A String", # The header field name + "value": "A String", # The header field value }, - }, + ], + "path": "A String", # (Optional) Path to access on the HTTP server. + "scheme": "A String", # (Optional) Scheme to use for connecting to the host. Defaults to HTTP. }, - "initialDelaySeconds": 42, # Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "periodSeconds": 42, # How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. +optional - "successThreshold": 42, # Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. +optional - "timeoutSeconds": 42, # Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional + "initialDelaySeconds": 42, # (Optional) Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "periodSeconds": 42, # (Optional) How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + "successThreshold": 42, # (Optional) Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + "tcpSocket": { # Not supported by Cloud Run TCPSocketAction describes an action based on opening a socket # (Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported A field inlined from the Handler message. + "host": "A String", # (Optional) Optional: Host name to connect to, defaults to the pod IP. + "port": 42, # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. This field is currently limited to integer types only because of proto's inability to properly support the IntOrString golang type. + }, + "timeoutSeconds": 42, # (Optional) Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes }, - "resources": { # ResourceRequirements describes the compute resource requirements. # Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources +optional - "limits": { # Limits describes the maximum amount of compute resources allowed. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go + "resources": { # ResourceRequirements describes the compute resource requirements. # (Optional) Compute Resources required by this container. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + "limits": { # (Optional) Only memory and CPU are supported. Note: The only supported values for CPU are '1', '2', and '4'. Setting 4 CPU requires at least 2Gi of memory. Limits describes the maximum amount of compute resources allowed. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go "a_key": "A String", }, - "limitsInMap": { # Limits describes the maximum amount of compute resources allowed. This is a temporary field created to migrate away from the map limits field. This is done to become compliant with k8s style API. This field is deprecated in favor of limits field. - "a_key": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto - "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". - }, - }, - "requests": { # Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go + "requests": { # (Optional) Only memory and CPU are supported. Note: The only supported values for CPU are '1' and '2'. Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go "a_key": "A String", }, - "requestsInMap": { # Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. This is a temporary field created to migrate away from the map requests field. This is done to become compliant with k8s style API. This field is deprecated in favor of requests field. - "a_key": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto - "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". - }, - }, }, - "securityContext": { # SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence. # Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +optional - "allowPrivilegeEscalation": True or False, # AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN +optional - "capabilities": { # Adds and removes POSIX capabilities from running containers. # The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. +optional - "add": [ # Added capabilities +optional + "securityContext": { # Not supported by Cloud Run SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence. # (Optional) Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + "runAsUser": 42, # (Optional) The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + }, + "startupProbe": { # Not supported by Cloud Run Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # (Optional) Startup probe of application within the container. All other probes are disabled if a startup probe is provided, until it succeeds. Container will not be added to service endpoints if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "exec": { # Not supported by Cloud Run ExecAction describes a "run in container" action. # (Optional) One and only one of the following should be specified. Exec specifies the action to take. A field inlined from the Handler message. + "command": [ # (Optional) Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. "A String", ], - "drop": [ # Removed capabilities +optional - "A String", + }, + "failureThreshold": 42, # (Optional) Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + "httpGet": { # Not supported by Cloud Run HTTPGetAction describes an action based on HTTP Get requests. # (Optional) HTTPGet specifies the http request to perform. A field inlined from the Handler message. + "host": "A String", # (Optional) Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + "httpHeaders": [ # (Optional) Custom headers to set in the request. HTTP allows repeated headers. + { # Not supported by Cloud Run HTTPHeader describes a custom header to be used in HTTP probes + "name": "A String", # The header field name + "value": "A String", # The header field value + }, ], + "path": "A String", # (Optional) Path to access on the HTTP server. + "scheme": "A String", # (Optional) Scheme to use for connecting to the host. Defaults to HTTP. }, - "privileged": True or False, # Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. +optional - "readOnlyRootFilesystem": True or False, # Whether this container has a read-only root filesystem. Default is false. +optional - "runAsGroup": 42, # The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "runAsNonRoot": True or False, # Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "runAsUser": 42, # The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "seLinuxOptions": { # SELinuxOptions are the labels to be applied to the container # The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "level": "A String", # Level is SELinux level label that applies to the container. +optional - "role": "A String", # Role is a SELinux role label that applies to the container. +optional - "type": "A String", # Type is a SELinux type label that applies to the container. +optional - "user": "A String", # User is a SELinux user label that applies to the container. +optional + "initialDelaySeconds": 42, # (Optional) Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "periodSeconds": 42, # (Optional) How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + "successThreshold": 42, # (Optional) Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + "tcpSocket": { # Not supported by Cloud Run TCPSocketAction describes an action based on opening a socket # (Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported A field inlined from the Handler message. + "host": "A String", # (Optional) Optional: Host name to connect to, defaults to the pod IP. + "port": 42, # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. This field is currently limited to integer types only because of proto's inability to properly support the IntOrString golang type. }, + "timeoutSeconds": 42, # (Optional) Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes }, - "stdin": True or False, # Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. +optional - "stdinOnce": True or False, # Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false +optional - "terminationMessagePath": "A String", # Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. +optional - "terminationMessagePolicy": "A String", # Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. +optional - "tty": True or False, # Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. +optional - "volumeDevices": [ # volumeDevices is the list of block devices to be used by the container. This is an alpha feature and may change in the future. +optional - { # volumeDevice describes a mapping of a raw block device within a container. - "devicePath": "A String", # devicePath is the path inside of the container that the device will be mapped to. - "name": "A String", # name must match the name of a persistentVolumeClaim in the pod - }, - ], - "volumeMounts": [ # Pod volumes to mount into the container's filesystem. Cannot be updated. +optional - { # VolumeMount describes a mounting of a Volume within a container. + "terminationMessagePath": "A String", # (Optional) Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. + "terminationMessagePolicy": "A String", # (Optional) Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + "volumeMounts": [ # (Optional) Volume to mount into the container's filesystem. Only supports SecretVolumeSources. Pod volumes to mount into the container's filesystem. + { # Not supported by Cloud Run VolumeMount describes a mounting of a Volume within a container. "mountPath": "A String", # Path within the container at which the volume should be mounted. Must not contain ':'. - "mountPropagation": "A String", # mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationHostToContainer is used. This field is beta in 1.10. +optional "name": "A String", # This must match the Name of a Volume. - "readOnly": True or False, # Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. +optional - "subPath": "A String", # Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). +optional + "readOnly": True or False, # (Optional) Only true is accepted. Defaults to true. + "subPath": "A String", # (Optional) Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). }, ], - "workingDir": "A String", # Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. +optional + "workingDir": "A String", # (Optional) Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. }, ], "restartPolicy": "A String", # Optional. Restart policy for all containers within the instance. Allowed values are: - OnFailure: Instances will always be restarted on failure if the backoffLimit has not been reached. - Never: Instances are never restarted and all failures are permanent. Cannot be used if backoffLimit is set. +optional "serviceAccountName": "A String", # Optional. Email address of the IAM service account associated with the instance of a Job. The service account represents the identity of the running instance, and determines what permissions the instance has. If not provided, the instance will use the project's default service account. +optional "terminationGracePeriodSeconds": "A String", # Optional. Optional duration in seconds the instance needs to terminate gracefully. Value must be non-negative integer. The value zero indicates delete immediately. The grace period is the duration in seconds after the processes running in the instance are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. +optional "volumes": [ # Optional. List of volumes that can be mounted by containers belonging to the instance. More info: https://kubernetes.io/docs/concepts/storage/volumes +optional - { # Volume represents a named volume in a container. - "configMap": { # Adapts a ConfigMap into a volume. The contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. - "defaultMode": 42, # Mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. - "items": [ # If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. + { # Not supported by Cloud Run Volume represents a named volume in a container. + "configMap": { # Not supported by Cloud Run Adapts a ConfigMap into a volume. The contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. + "defaultMode": 42, # (Optional) Mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + "items": [ # (Optional) If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified that is not present in the Secret, the volume setup will error unless it is marked optional. { # Maps a string key to a path within a volume. - "key": "A String", # Cloud Run fully managed: supported The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. Cloud Run for Anthos: supported The key to project. - "mode": 42, # Mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. +optional - "path": "A String", # Cloud Run fully managed: supported Cloud Run for Anthos: supported The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + "key": "A String", # The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. The key to project. + "mode": 42, # (Optional) Mode bits to use on this file, must be a value between 0000 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + "path": "A String", # The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. }, ], "name": "A String", # Name of the config. - "optional": True or False, # Specify whether the Secret or its keys must be defined. + "optional": True or False, # (Optional) Specify whether the Secret or its keys must be defined. }, "name": "A String", # Volume's name. - "secret": { # The contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. - "defaultMode": 42, # Mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. - "items": [ # Cloud Run fully managed: supported If unspecified, the volume will expose a file whose name is the secret_name. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a key and a path. Cloud Run for Anthos: supported If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. + "secret": { # The secret's value will be presented as the content of a file whose name is defined in the item path. If no items are defined, the name of the file is the secret_name. The contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. + "defaultMode": 42, # (Optional) Mode bits to use on created files by default. Must be a value between 0000 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. NOTE: This is an integer representation of the mode bits. So, the integer value should look exactly as the chmod numeric notation, i.e. Unix chmod "777" (a=rwx) should have the integer value 777. + "items": [ # (Optional) If unspecified, the volume will expose a file whose name is the secret_name. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a key and a path. If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified that is not present in the Secret, the volume setup will error unless it is marked optional. { # Maps a string key to a path within a volume. - "key": "A String", # Cloud Run fully managed: supported The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. Cloud Run for Anthos: supported The key to project. - "mode": 42, # Mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. +optional - "path": "A String", # Cloud Run fully managed: supported Cloud Run for Anthos: supported The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + "key": "A String", # The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. The key to project. + "mode": 42, # (Optional) Mode bits to use on this file, must be a value between 0000 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + "path": "A String", # The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. }, ], - "optional": True or False, # Specify whether the Secret or its keys must be defined. - "secretName": "A String", # Cloud Run fully managed: supported The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. Cloud Run for Anthos: supported Name of the secret in the container's namespace to use. + "optional": True or False, # (Optional) Specify whether the Secret or its keys must be defined. + "secretName": "A String", # The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. Name of the secret in the container's namespace to use. }, }, ], @@ -1307,25 +1007,25 @@Method Details
{ # Job represents the configuration of a single job. A job an immutable resource that references a container image which is run to completion. "apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources +optional "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds +optional - "metadata": { # ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional - "annotations": { # Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations +optional + "metadata": { # k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional + "annotations": { # (Optional) Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations "a_key": "A String", }, - "clusterName": "A String", # Not currently supported by Cloud Run. The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. +optional - "creationTimestamp": "A String", # CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional - "deletionGracePeriodSeconds": 42, # Not currently supported by Cloud Run. Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. +optional - "deletionTimestamp": "A String", # DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +optional - "finalizers": [ # Not currently supported by Cloud Run. Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. +optional +patchStrategy=merge + "clusterName": "A String", # (Optional) Not supported by Cloud Run The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + "creationTimestamp": "A String", # (Optional) CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + "deletionGracePeriodSeconds": 42, # (Optional) Not supported by Cloud Run Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. + "deletionTimestamp": "A String", # (Optional) Not supported by Cloud Run DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + "finalizers": [ # (Optional) Not supported by Cloud Run Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. +patchStrategy=merge "A String", ], - "generateName": "A String", # Not currently supported by Cloud Run. GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency +optional string generateName = 2; - "generation": 42, # A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. +optional - "labels": { # Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and routes. More info: http://kubernetes.io/docs/user-guide/labels +optional + "generateName": "A String", # (Optional) Not supported by Cloud Run GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency string generateName = 2; + "generation": 42, # (Optional) A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. + "labels": { # (Optional) Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and routes. More info: http://kubernetes.io/docs/user-guide/labels "a_key": "A String", }, "name": "A String", # Name must be unique within a namespace, within a Cloud Run region. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names +optional "namespace": "A String", # Namespace defines the space within each name must be unique, within a Cloud Run region. In Cloud Run the namespace must be equal to either the project ID or project number. - "ownerReferences": [ # List of objects that own this object. If ALL objects in the list have been deleted, this object will be garbage collected. +optional + "ownerReferences": [ # (Optional) Not supported by Cloud Run List of objects that own this object. If ALL objects in the list have been deleted, this object will be garbage collected. { # OwnerReference contains enough information to let you identify an owning object. Currently, an owning object must be in the same namespace, so there is no namespace field. "apiVersion": "A String", # API version of the referent. "blockOwnerDeletion": True or False, # If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. +optional @@ -1335,9 +1035,9 @@Method Details
"uid": "A String", # UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids }, ], - "resourceVersion": "A String", # An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency +optional - "selfLink": "A String", # SelfLink is a URL representing this object. Populated by the system. Read-only. +optional string selfLink = 4; - "uid": "A String", # UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids +optional + "resourceVersion": "A String", # Optional. An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server or omit the value to disable conflict-detection. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients or omitted. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + "selfLink": "A String", # (Optional) SelfLink is a URL representing this object. Populated by the system. Read-only. string selfLink = 4; + "uid": "A String", # (Optional) UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids }, "spec": { # JobSpec describes how the job execution will look like. # Optional. Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status +optional "activeDeadlineSeconds": "A String", # Optional. Not supported. Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it. If set to zero, the system will never attempt to terminate the job based on time. Otherwise, the value must be positive integer. +optional @@ -1349,299 +1049,199 @@Method Details
"activeDeadlineSeconds": "A String", # Optional. Optional duration in seconds the instance may be active relative to StartTime before the system will actively try to mark it failed and kill associated containers. If set to zero, the system will never attempt to kill an instance based on time. Otherwise, value must be a positive integer. +optional "containers": [ # Optional. List of containers belonging to the instance. We disallow a number of fields on this Container. Only a single container may be provided. { # A single application container. This specifies both the container to run, the command to run in the container and the arguments to supply to it. Note that additional arguments may be supplied by the system to the container at runtime. - "args": [ # Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell +optional + "args": [ # (Optional) Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell "A String", ], - "command": [ # Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell +optional + "command": [ "A String", ], - "env": [ # List of environment variables to set in the container. Cannot be updated. +optional + "env": [ # (Optional) List of environment variables to set in the container. { # EnvVar represents an environment variable present in a Container. "name": "A String", # Name of the environment variable. Must be a C_IDENTIFIER. - "value": "A String", # Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". +optional - "valueFrom": { # Cloud Run fully managed: not supported Cloud Run on GKE: supported EnvVarSource represents a source for the value of an EnvVar. # Cloud Run fully managed: supported Source for the environment variable's value. Only supports secret_key_ref. Cloud Run for Anthos: supported Source for the environment variable's value. Cannot be used if value is not empty. +optional - "configMapKeyRef": { # Cloud Run fully managed: not supported Cloud Run on GKE: supported Selects a key from a ConfigMap. # Cloud Run fully managed: not supported Cloud Run on GKE: supported Selects a key of a ConfigMap. +optional - "key": "A String", # Cloud Run fully managed: not supported Cloud Run on GKE: supported The key to select. - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "value": "A String", # (Optional) Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". + "valueFrom": { # EnvVarSource represents a source for the value of an EnvVar. # (Optional) Source for the environment variable's value. Only supports secret_key_ref. Source for the environment variable's value. Cannot be used if value is not empty. + "configMapKeyRef": { # Not supported by Cloud Run Selects a key from a ConfigMap. # (Optional) Not supported by Cloud Run Selects a key of a ConfigMap. + "key": "A String", # The key to select. + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: not supported Cloud Run on GKE: supported The ConfigMap to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run on GKE: supported Specify whether the ConfigMap or its key must be defined +optional + "name": "A String", # The ConfigMap to select from. + "optional": True or False, # (Optional) Specify whether the ConfigMap or its key must be defined }, - "secretKeyRef": { # Cloud Run fully managed: supported Cloud Run on GKE: supported SecretKeySelector selects a key of a Secret. # Cloud Run fully managed: supported. Selects a key (version) of a secret in Secret Manager. Cloud Run for Anthos: supported. Selects a key of a secret in the pod's namespace. +optional - "key": "A String", # Cloud Run fully managed: supported A Cloud Secret Manager secret version. Must be 'latest' for the latest version or an integer for a specific version. Cloud Run for Anthos: supported The key of the secret to select from. Must be a valid secret key. - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "secretKeyRef": { # SecretKeySelector selects a key of a Secret. # (Optional) Selects a key (version) of a secret in Secret Manager. + "key": "A String", # A Cloud Secret Manager secret version. Must be 'latest' for the latest version or an integer for a specific version. The key of the secret to select from. Must be a valid secret key. + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: supported The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. Cloud Run for Anthos: supported The name of the secret in the pod's namespace to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run on GKE: supported Specify whether the Secret or its key must be defined +optional + "name": "A String", # The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. The name of the secret in the pod's namespace to select from. + "optional": True or False, # (Optional) Specify whether the Secret or its key must be defined }, }, }, ], - "envFrom": [ # List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. +optional - { # EnvFromSource represents the source of a set of ConfigMaps - "configMapRef": { # ConfigMapEnvSource selects a ConfigMap to populate the environment variables with. The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables. # The ConfigMap to select from +optional - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "envFrom": [ # (Optional) List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + { # Not supported by Cloud Run EnvFromSource represents the source of a set of ConfigMaps + "configMapRef": { # Not supported by Cloud Run ConfigMapEnvSource selects a ConfigMap to populate the environment variables with. The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables. # (Optional) The ConfigMap to select from + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: not supported Cloud Run for Anthos: supported The ConfigMap to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run for Anthos: supported Specify whether the ConfigMap must be defined +optional + "name": "A String", # The ConfigMap to select from. + "optional": True or False, # (Optional) Specify whether the ConfigMap must be defined }, - "prefix": "A String", # An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. +optional - "secretRef": { # SecretEnvSource selects a Secret to populate the environment variables with. The contents of the target Secret's Data field will represent the key-value pairs as environment variables. # The Secret to select from +optional - "localObjectReference": { # LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. - "name": "A String", # Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + "prefix": "A String", # (Optional) An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + "secretRef": { # Not supported by Cloud Run SecretEnvSource selects a Secret to populate the environment variables with. The contents of the target Secret's Data field will represent the key-value pairs as environment variables. # (Optional) The Secret to select from + "localObjectReference": { # Not supported by Cloud Run LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. # This field should not be used directly as it is meant to be inlined directly into the message. Use the "name" field instead. + "name": "A String", # (Optional) Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names }, - "name": "A String", # Cloud Run fully managed: not supported Cloud Run for Anthos: supported The Secret to select from. - "optional": True or False, # Cloud Run fully managed: not supported Cloud Run for Anthos: supported Specify whether the Secret must be defined +optional + "name": "A String", # The Secret to select from. + "optional": True or False, # (Optional) Specify whether the Secret must be defined }, }, ], - "image": "A String", # Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images - "imagePullPolicy": "A String", # Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images +optional - "lifecycle": { # Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted. # Actions that the management system should take in response to container lifecycle events. Cannot be updated. +optional - "postStart": { # Handler defines a specific action that should be taken # PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +optional - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - }, + "image": "A String", # Only supports containers from Google Container Registry or Artifact Registry URL of the Container image. More info: https://kubernetes.io/docs/concepts/containers/images + "imagePullPolicy": "A String", # (Optional) Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + "livenessProbe": { # Not supported by Cloud Run Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # (Optional) Periodic probe of container liveness. Container will be restarted if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "exec": { # Not supported by Cloud Run ExecAction describes a "run in container" action. # (Optional) One and only one of the following should be specified. Exec specifies the action to take. A field inlined from the Handler message. + "command": [ # (Optional) Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + "A String", + ], }, - "preStop": { # Handler defines a specific action that should be taken # PreStop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +optional - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. + "failureThreshold": 42, # (Optional) Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + "httpGet": { # Not supported by Cloud Run HTTPGetAction describes an action based on HTTP Get requests. # (Optional) HTTPGet specifies the http request to perform. A field inlined from the Handler message. + "host": "A String", # (Optional) Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + "httpHeaders": [ # (Optional) Custom headers to set in the request. HTTP allows repeated headers. + { # Not supported by Cloud Run HTTPHeader describes a custom header to be used in HTTP probes + "name": "A String", # The header field name + "value": "A String", # The header field value }, - }, + ], + "path": "A String", # (Optional) Path to access on the HTTP server. + "scheme": "A String", # (Optional) Scheme to use for connecting to the host. Defaults to HTTP. }, - }, - "livenessProbe": { # Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "failureThreshold": 42, # Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. +optional - "handler": { # Handler defines a specific action that should be taken # The action taken to determine the health of a container - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - }, + "initialDelaySeconds": 42, # (Optional) Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "periodSeconds": 42, # (Optional) How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + "successThreshold": 42, # (Optional) Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + "tcpSocket": { # Not supported by Cloud Run TCPSocketAction describes an action based on opening a socket # (Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported A field inlined from the Handler message. + "host": "A String", # (Optional) Optional: Host name to connect to, defaults to the pod IP. + "port": 42, # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. This field is currently limited to integer types only because of proto's inability to properly support the IntOrString golang type. }, - "initialDelaySeconds": 42, # Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "periodSeconds": 42, # How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. +optional - "successThreshold": 42, # Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. +optional - "timeoutSeconds": 42, # Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional + "timeoutSeconds": 42, # (Optional) Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes }, - "name": "A String", # Name of the container specified as a DNS_LABEL. Each container must have a unique name (DNS_LABEL). Cannot be updated. - "ports": [ # List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated. +optional + "name": "A String", # (Optional) Name of the container specified as a DNS_LABEL. Currently unused in Cloud Run. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names + "ports": [ # (Optional) List of ports to expose from the container. Only a single port can be specified. The specified ports must be listening on all interfaces (0.0.0.0) within the container to be accessible. If omitted, a port number will be chosen and passed to the container through the PORT environment variable for the container to listen on. { # ContainerPort represents a network port in a single container. - "containerPort": 42, # Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. - "hostIP": "A String", # What host IP to bind the external port to. +optional - "hostPort": 42, # Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. +optional - "name": "A String", # If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. +optional - "protocol": "A String", # Protocol for port. Must be UDP or TCP. Defaults to "TCP". +optional + "containerPort": 42, # (Optional) Port number the container listens on. This must be a valid port number, 0 < x < 65536. + "name": "A String", # (Optional) If specified, used to specify which protocol to use. Allowed values are "http1" and "h2c". + "protocol": "A String", # (Optional) Protocol for port. Must be "TCP". Defaults to "TCP". }, ], - "readinessProbe": { # Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "failureThreshold": 42, # Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. +optional - "handler": { # Handler defines a specific action that should be taken # The action taken to determine the health of a container - "exec": { # ExecAction describes a "run in container" action. # One and only one of the following should be specified. Exec specifies the action to take. +optional - "command": [ # Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. +optional - "A String", - ], - }, - "httpGet": { # HTTPGetAction describes an action based on HTTP Get requests. # HTTPGet specifies the http request to perform. +optional - "host": "A String", # Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. +optional - "httpHeaders": [ # Custom headers to set in the request. HTTP allows repeated headers. +optional - { # HTTPHeader describes a custom header to be used in HTTP probes - "name": "A String", # The header field name - "value": "A String", # The header field value - }, - ], - "path": "A String", # Path to access on the HTTP server. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. - }, - "scheme": "A String", # Scheme to use for connecting to the host. Defaults to HTTP. +optional - }, - "tcpSocket": { # TCPSocketAction describes an action based on opening a socket # TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - "host": "A String", # Optional: Host name to connect to, defaults to the pod IP. +optional - "port": { # IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number. # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - "intVal": 42, # The int value. - "strVal": "A String", # The string value. - "type": 42, # The type of the value. + "readinessProbe": { # Not supported by Cloud Run Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # (Optional) Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "exec": { # Not supported by Cloud Run ExecAction describes a "run in container" action. # (Optional) One and only one of the following should be specified. Exec specifies the action to take. A field inlined from the Handler message. + "command": [ # (Optional) Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + "A String", + ], + }, + "failureThreshold": 42, # (Optional) Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + "httpGet": { # Not supported by Cloud Run HTTPGetAction describes an action based on HTTP Get requests. # (Optional) HTTPGet specifies the http request to perform. A field inlined from the Handler message. + "host": "A String", # (Optional) Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + "httpHeaders": [ # (Optional) Custom headers to set in the request. HTTP allows repeated headers. + { # Not supported by Cloud Run HTTPHeader describes a custom header to be used in HTTP probes + "name": "A String", # The header field name + "value": "A String", # The header field value }, - }, + ], + "path": "A String", # (Optional) Path to access on the HTTP server. + "scheme": "A String", # (Optional) Scheme to use for connecting to the host. Defaults to HTTP. }, - "initialDelaySeconds": 42, # Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional - "periodSeconds": 42, # How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. +optional - "successThreshold": 42, # Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. +optional - "timeoutSeconds": 42, # Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +optional + "initialDelaySeconds": 42, # (Optional) Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "periodSeconds": 42, # (Optional) How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + "successThreshold": 42, # (Optional) Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + "tcpSocket": { # Not supported by Cloud Run TCPSocketAction describes an action based on opening a socket # (Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported A field inlined from the Handler message. + "host": "A String", # (Optional) Optional: Host name to connect to, defaults to the pod IP. + "port": 42, # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. This field is currently limited to integer types only because of proto's inability to properly support the IntOrString golang type. + }, + "timeoutSeconds": 42, # (Optional) Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes }, - "resources": { # ResourceRequirements describes the compute resource requirements. # Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources +optional - "limits": { # Limits describes the maximum amount of compute resources allowed. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go + "resources": { # ResourceRequirements describes the compute resource requirements. # (Optional) Compute Resources required by this container. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + "limits": { # (Optional) Only memory and CPU are supported. Note: The only supported values for CPU are '1', '2', and '4'. Setting 4 CPU requires at least 2Gi of memory. Limits describes the maximum amount of compute resources allowed. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go "a_key": "A String", }, - "limitsInMap": { # Limits describes the maximum amount of compute resources allowed. This is a temporary field created to migrate away from the map limits field. This is done to become compliant with k8s style API. This field is deprecated in favor of limits field. - "a_key": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto - "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". - }, - }, - "requests": { # Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go + "requests": { # (Optional) Only memory and CPU are supported. Note: The only supported values for CPU are '1' and '2'. Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go "a_key": "A String", }, - "requestsInMap": { # Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. This is a temporary field created to migrate away from the map requests field. This is done to become compliant with k8s style API. This field is deprecated in favor of requests field. - "a_key": { # The view model of a single quantity, e.g. "800 MiB". Corresponds to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto - "string": "A String", # Stringified version of the quantity, e.g., "800 MiB". - }, - }, }, - "securityContext": { # SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence. # Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +optional - "allowPrivilegeEscalation": True or False, # AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN +optional - "capabilities": { # Adds and removes POSIX capabilities from running containers. # The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. +optional - "add": [ # Added capabilities +optional + "securityContext": { # Not supported by Cloud Run SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence. # (Optional) Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + "runAsUser": 42, # (Optional) The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + }, + "startupProbe": { # Not supported by Cloud Run Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. # (Optional) Startup probe of application within the container. All other probes are disabled if a startup probe is provided, until it succeeds. Container will not be added to service endpoints if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "exec": { # Not supported by Cloud Run ExecAction describes a "run in container" action. # (Optional) One and only one of the following should be specified. Exec specifies the action to take. A field inlined from the Handler message. + "command": [ # (Optional) Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. "A String", ], - "drop": [ # Removed capabilities +optional - "A String", + }, + "failureThreshold": 42, # (Optional) Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + "httpGet": { # Not supported by Cloud Run HTTPGetAction describes an action based on HTTP Get requests. # (Optional) HTTPGet specifies the http request to perform. A field inlined from the Handler message. + "host": "A String", # (Optional) Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + "httpHeaders": [ # (Optional) Custom headers to set in the request. HTTP allows repeated headers. + { # Not supported by Cloud Run HTTPHeader describes a custom header to be used in HTTP probes + "name": "A String", # The header field name + "value": "A String", # The header field value + }, ], + "path": "A String", # (Optional) Path to access on the HTTP server. + "scheme": "A String", # (Optional) Scheme to use for connecting to the host. Defaults to HTTP. }, - "privileged": True or False, # Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. +optional - "readOnlyRootFilesystem": True or False, # Whether this container has a read-only root filesystem. Default is false. +optional - "runAsGroup": 42, # The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "runAsNonRoot": True or False, # Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "runAsUser": 42, # The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "seLinuxOptions": { # SELinuxOptions are the labels to be applied to the container # The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +optional - "level": "A String", # Level is SELinux level label that applies to the container. +optional - "role": "A String", # Role is a SELinux role label that applies to the container. +optional - "type": "A String", # Type is a SELinux type label that applies to the container. +optional - "user": "A String", # User is a SELinux user label that applies to the container. +optional + "initialDelaySeconds": 42, # (Optional) Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + "periodSeconds": 42, # (Optional) How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + "successThreshold": 42, # (Optional) Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + "tcpSocket": { # Not supported by Cloud Run TCPSocketAction describes an action based on opening a socket # (Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported A field inlined from the Handler message. + "host": "A String", # (Optional) Optional: Host name to connect to, defaults to the pod IP. + "port": 42, # Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. This field is currently limited to integer types only because of proto's inability to properly support the IntOrString golang type. }, + "timeoutSeconds": 42, # (Optional) Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes }, - "stdin": True or False, # Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. +optional - "stdinOnce": True or False, # Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false +optional - "terminationMessagePath": "A String", # Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. +optional - "terminationMessagePolicy": "A String", # Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. +optional - "tty": True or False, # Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. +optional - "volumeDevices": [ # volumeDevices is the list of block devices to be used by the container. This is an alpha feature and may change in the future. +optional - { # volumeDevice describes a mapping of a raw block device within a container. - "devicePath": "A String", # devicePath is the path inside of the container that the device will be mapped to. - "name": "A String", # name must match the name of a persistentVolumeClaim in the pod - }, - ], - "volumeMounts": [ # Pod volumes to mount into the container's filesystem. Cannot be updated. +optional - { # VolumeMount describes a mounting of a Volume within a container. + "terminationMessagePath": "A String", # (Optional) Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. + "terminationMessagePolicy": "A String", # (Optional) Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + "volumeMounts": [ # (Optional) Volume to mount into the container's filesystem. Only supports SecretVolumeSources. Pod volumes to mount into the container's filesystem. + { # Not supported by Cloud Run VolumeMount describes a mounting of a Volume within a container. "mountPath": "A String", # Path within the container at which the volume should be mounted. Must not contain ':'. - "mountPropagation": "A String", # mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationHostToContainer is used. This field is beta in 1.10. +optional "name": "A String", # This must match the Name of a Volume. - "readOnly": True or False, # Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. +optional - "subPath": "A String", # Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). +optional + "readOnly": True or False, # (Optional) Only true is accepted. Defaults to true. + "subPath": "A String", # (Optional) Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). }, ], - "workingDir": "A String", # Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. +optional + "workingDir": "A String", # (Optional) Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. }, ], "restartPolicy": "A String", # Optional. Restart policy for all containers within the instance. Allowed values are: - OnFailure: Instances will always be restarted on failure if the backoffLimit has not been reached. - Never: Instances are never restarted and all failures are permanent. Cannot be used if backoffLimit is set. +optional "serviceAccountName": "A String", # Optional. Email address of the IAM service account associated with the instance of a Job. The service account represents the identity of the running instance, and determines what permissions the instance has. If not provided, the instance will use the project's default service account. +optional "terminationGracePeriodSeconds": "A String", # Optional. Optional duration in seconds the instance needs to terminate gracefully. Value must be non-negative integer. The value zero indicates delete immediately. The grace period is the duration in seconds after the processes running in the instance are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. +optional "volumes": [ # Optional. List of volumes that can be mounted by containers belonging to the instance. More info: https://kubernetes.io/docs/concepts/storage/volumes +optional - { # Volume represents a named volume in a container. - "configMap": { # Adapts a ConfigMap into a volume. The contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. - "defaultMode": 42, # Mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. - "items": [ # If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. + { # Not supported by Cloud Run Volume represents a named volume in a container. + "configMap": { # Not supported by Cloud Run Adapts a ConfigMap into a volume. The contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. + "defaultMode": 42, # (Optional) Mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + "items": [ # (Optional) If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified that is not present in the Secret, the volume setup will error unless it is marked optional. { # Maps a string key to a path within a volume. - "key": "A String", # Cloud Run fully managed: supported The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. Cloud Run for Anthos: supported The key to project. - "mode": 42, # Mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. +optional - "path": "A String", # Cloud Run fully managed: supported Cloud Run for Anthos: supported The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + "key": "A String", # The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. The key to project. + "mode": 42, # (Optional) Mode bits to use on this file, must be a value between 0000 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + "path": "A String", # The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. }, ], "name": "A String", # Name of the config. - "optional": True or False, # Specify whether the Secret or its keys must be defined. + "optional": True or False, # (Optional) Specify whether the Secret or its keys must be defined. }, "name": "A String", # Volume's name. - "secret": { # The contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. - "defaultMode": 42, # Mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. - "items": [ # Cloud Run fully managed: supported If unspecified, the volume will expose a file whose name is the secret_name. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a key and a path. Cloud Run for Anthos: supported If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. + "secret": { # The secret's value will be presented as the content of a file whose name is defined in the item path. If no items are defined, the name of the file is the secret_name. The contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. + "defaultMode": 42, # (Optional) Mode bits to use on created files by default. Must be a value between 0000 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. NOTE: This is an integer representation of the mode bits. So, the integer value should look exactly as the chmod numeric notation, i.e. Unix chmod "777" (a=rwx) should have the integer value 777. + "items": [ # (Optional) If unspecified, the volume will expose a file whose name is the secret_name. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a key and a path. If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified that is not present in the Secret, the volume setup will error unless it is marked optional. { # Maps a string key to a path within a volume. - "key": "A String", # Cloud Run fully managed: supported The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. Cloud Run for Anthos: supported The key to project. - "mode": 42, # Mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. +optional - "path": "A String", # Cloud Run fully managed: supported Cloud Run for Anthos: supported The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + "key": "A String", # The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. The key to project. + "mode": 42, # (Optional) Mode bits to use on this file, must be a value between 0000 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + "path": "A String", # The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. }, ], - "optional": True or False, # Specify whether the Secret or its keys must be defined. - "secretName": "A String", # Cloud Run fully managed: supported The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. Cloud Run for Anthos: supported Name of the secret in the container's namespace to use. + "optional": True or False, # (Optional) Specify whether the Secret or its keys must be defined. + "secretName": "A String", # The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. If the secret is in another project, you must define an alias. An alias definition has the form: :projects//secrets/. If multiple alias definitions are needed, they must be separated by commas. The alias definitions must be set on the run.googleapis.com/secrets annotation. Name of the secret in the container's namespace to use. }, }, ], diff --git a/docs/dyn/slides_v1.presentations.html b/docs/dyn/slides_v1.presentations.html index d9b1e8fd411..b30cbbfdce8 100644 --- a/docs/dyn/slides_v1.presentations.html +++ b/docs/dyn/slides_v1.presentations.html @@ -2031,6 +2031,7 @@Method Details
"pageType": "A String", # The type of the page. "revisionId": "A String", # The revision ID of the presentation containing this page. Can be used in update requests to assert that the presentation revision hasn't changed since the last read operation. Only populated if the user has edit access to the presentation. The format of the revision ID may change over time, so it should be treated opaquely. A returned revision ID is only guaranteed to be valid for 24 hours after it has been returned and cannot be shared across users. If the revision ID is unchanged between calls, then the presentation has not changed. Conversely, a changed ID (for the same presentation and user) usually means the presentation has been updated; however, a changed ID can also be due to internal factors such as ID format changes. "slideProperties": { # The properties of Page that are only relevant for pages with page_type SLIDE. # Slide specific properties. Only set if page_type = SLIDE. + "isSkipped": True or False, # Whether the slide is skipped in the presentation mode. Defaults to false. "layoutObjectId": "A String", # The object ID of the layout that this slide is based on. This property is read-only. "masterObjectId": "A String", # The object ID of the master that this slide is based on. This property is read-only. "notesPage": # Object with schema name: Page # The notes page that this slide is associated with. It defines the visual appearance of a notes page when printing or exporting slides with speaker notes. A notes page inherits properties from the notes master. The placeholder shape with type BODY on the notes page contains the speaker notes for this slide. The ID of this shape is identified by the speakerNotesObjectId field. The notes page is read-only except for the text content and styles of the speaker notes shape. This property is read-only. @@ -3041,6 +3042,7 @@Method Details
"pageType": "A String", # The type of the page. "revisionId": "A String", # The revision ID of the presentation containing this page. Can be used in update requests to assert that the presentation revision hasn't changed since the last read operation. Only populated if the user has edit access to the presentation. The format of the revision ID may change over time, so it should be treated opaquely. A returned revision ID is only guaranteed to be valid for 24 hours after it has been returned and cannot be shared across users. If the revision ID is unchanged between calls, then the presentation has not changed. Conversely, a changed ID (for the same presentation and user) usually means the presentation has been updated; however, a changed ID can also be due to internal factors such as ID format changes. "slideProperties": { # The properties of Page that are only relevant for pages with page_type SLIDE. # Slide specific properties. Only set if page_type = SLIDE. + "isSkipped": True or False, # Whether the slide is skipped in the presentation mode. Defaults to false. "layoutObjectId": "A String", # The object ID of the layout that this slide is based on. This property is read-only. "masterObjectId": "A String", # The object ID of the master that this slide is based on. This property is read-only. "notesPage": # Object with schema name: Page # The notes page that this slide is associated with. It defines the visual appearance of a notes page when printing or exporting slides with speaker notes. A notes page inherits properties from the notes master. The placeholder shape with type BODY on the notes page contains the speaker notes for this slide. The ID of this shape is identified by the speakerNotesObjectId field. The notes page is read-only except for the text content and styles of the speaker notes shape. This property is read-only. @@ -4049,6 +4051,7 @@Method Details
"pageType": "A String", # The type of the page. "revisionId": "A String", # The revision ID of the presentation containing this page. Can be used in update requests to assert that the presentation revision hasn't changed since the last read operation. Only populated if the user has edit access to the presentation. The format of the revision ID may change over time, so it should be treated opaquely. A returned revision ID is only guaranteed to be valid for 24 hours after it has been returned and cannot be shared across users. If the revision ID is unchanged between calls, then the presentation has not changed. Conversely, a changed ID (for the same presentation and user) usually means the presentation has been updated; however, a changed ID can also be due to internal factors such as ID format changes. "slideProperties": { # The properties of Page that are only relevant for pages with page_type SLIDE. # Slide specific properties. Only set if page_type = SLIDE. + "isSkipped": True or False, # Whether the slide is skipped in the presentation mode. Defaults to false. "layoutObjectId": "A String", # The object ID of the layout that this slide is based on. This property is read-only. "masterObjectId": "A String", # The object ID of the master that this slide is based on. This property is read-only. "notesPage": # Object with schema name: Page # The notes page that this slide is associated with. It defines the visual appearance of a notes page when printing or exporting slides with speaker notes. A notes page inherits properties from the notes master. The placeholder shape with type BODY on the notes page contains the speaker notes for this slide. The ID of this shape is identified by the speakerNotesObjectId field. The notes page is read-only except for the text content and styles of the speaker notes shape. This property is read-only. @@ -5069,6 +5072,7 @@Method Details
"pageType": "A String", # The type of the page. "revisionId": "A String", # The revision ID of the presentation containing this page. Can be used in update requests to assert that the presentation revision hasn't changed since the last read operation. Only populated if the user has edit access to the presentation. The format of the revision ID may change over time, so it should be treated opaquely. A returned revision ID is only guaranteed to be valid for 24 hours after it has been returned and cannot be shared across users. If the revision ID is unchanged between calls, then the presentation has not changed. Conversely, a changed ID (for the same presentation and user) usually means the presentation has been updated; however, a changed ID can also be due to internal factors such as ID format changes. "slideProperties": { # The properties of Page that are only relevant for pages with page_type SLIDE. # Slide specific properties. Only set if page_type = SLIDE. + "isSkipped": True or False, # Whether the slide is skipped in the presentation mode. Defaults to false. "layoutObjectId": "A String", # The object ID of the layout that this slide is based on. This property is read-only. "masterObjectId": "A String", # The object ID of the master that this slide is based on. This property is read-only. "notesPage": # Object with schema name: Page # The notes page that this slide is associated with. It defines the visual appearance of a notes page when printing or exporting slides with speaker notes. A notes page inherits properties from the notes master. The placeholder shape with type BODY on the notes page contains the speaker notes for this slide. The ID of this shape is identified by the speakerNotesObjectId field. The notes page is read-only except for the text content and styles of the speaker notes shape. This property is read-only. @@ -6090,6 +6094,7 @@Method Details
"pageType": "A String", # The type of the page. "revisionId": "A String", # The revision ID of the presentation containing this page. Can be used in update requests to assert that the presentation revision hasn't changed since the last read operation. Only populated if the user has edit access to the presentation. The format of the revision ID may change over time, so it should be treated opaquely. A returned revision ID is only guaranteed to be valid for 24 hours after it has been returned and cannot be shared across users. If the revision ID is unchanged between calls, then the presentation has not changed. Conversely, a changed ID (for the same presentation and user) usually means the presentation has been updated; however, a changed ID can also be due to internal factors such as ID format changes. "slideProperties": { # The properties of Page that are only relevant for pages with page_type SLIDE. # Slide specific properties. Only set if page_type = SLIDE. + "isSkipped": True or False, # Whether the slide is skipped in the presentation mode. Defaults to false. "layoutObjectId": "A String", # The object ID of the layout that this slide is based on. This property is read-only. "masterObjectId": "A String", # The object ID of the master that this slide is based on. This property is read-only. "notesPage": # Object with schema name: Page # The notes page that this slide is associated with. It defines the visual appearance of a notes page when printing or exporting slides with speaker notes. A notes page inherits properties from the notes master. The placeholder shape with type BODY on the notes page contains the speaker notes for this slide. The ID of this shape is identified by the speakerNotesObjectId field. The notes page is read-only except for the text content and styles of the speaker notes shape. This property is read-only. @@ -7100,6 +7105,7 @@Method Details
"pageType": "A String", # The type of the page. "revisionId": "A String", # The revision ID of the presentation containing this page. Can be used in update requests to assert that the presentation revision hasn't changed since the last read operation. Only populated if the user has edit access to the presentation. The format of the revision ID may change over time, so it should be treated opaquely. A returned revision ID is only guaranteed to be valid for 24 hours after it has been returned and cannot be shared across users. If the revision ID is unchanged between calls, then the presentation has not changed. Conversely, a changed ID (for the same presentation and user) usually means the presentation has been updated; however, a changed ID can also be due to internal factors such as ID format changes. "slideProperties": { # The properties of Page that are only relevant for pages with page_type SLIDE. # Slide specific properties. Only set if page_type = SLIDE. + "isSkipped": True or False, # Whether the slide is skipped in the presentation mode. Defaults to false. "layoutObjectId": "A String", # The object ID of the layout that this slide is based on. This property is read-only. "masterObjectId": "A String", # The object ID of the master that this slide is based on. This property is read-only. "notesPage": # Object with schema name: Page # The notes page that this slide is associated with. It defines the visual appearance of a notes page when printing or exporting slides with speaker notes. A notes page inherits properties from the notes master. The placeholder shape with type BODY on the notes page contains the speaker notes for this slide. The ID of this shape is identified by the speakerNotesObjectId field. The notes page is read-only except for the text content and styles of the speaker notes shape. This property is read-only. @@ -8108,6 +8114,7 @@Method Details
"pageType": "A String", # The type of the page. "revisionId": "A String", # The revision ID of the presentation containing this page. Can be used in update requests to assert that the presentation revision hasn't changed since the last read operation. Only populated if the user has edit access to the presentation. The format of the revision ID may change over time, so it should be treated opaquely. A returned revision ID is only guaranteed to be valid for 24 hours after it has been returned and cannot be shared across users. If the revision ID is unchanged between calls, then the presentation has not changed. Conversely, a changed ID (for the same presentation and user) usually means the presentation has been updated; however, a changed ID can also be due to internal factors such as ID format changes. "slideProperties": { # The properties of Page that are only relevant for pages with page_type SLIDE. # Slide specific properties. Only set if page_type = SLIDE. + "isSkipped": True or False, # Whether the slide is skipped in the presentation mode. Defaults to false. "layoutObjectId": "A String", # The object ID of the layout that this slide is based on. This property is read-only. "masterObjectId": "A String", # The object ID of the master that this slide is based on. This property is read-only. "notesPage": # Object with schema name: Page # The notes page that this slide is associated with. It defines the visual appearance of a notes page when printing or exporting slides with speaker notes. A notes page inherits properties from the notes master. The placeholder shape with type BODY on the notes page contains the speaker notes for this slide. The ID of this shape is identified by the speakerNotesObjectId field. The notes page is read-only except for the text content and styles of the speaker notes shape. This property is read-only. @@ -9128,6 +9135,7 @@Method Details
"pageType": "A String", # The type of the page. "revisionId": "A String", # The revision ID of the presentation containing this page. Can be used in update requests to assert that the presentation revision hasn't changed since the last read operation. Only populated if the user has edit access to the presentation. The format of the revision ID may change over time, so it should be treated opaquely. A returned revision ID is only guaranteed to be valid for 24 hours after it has been returned and cannot be shared across users. If the revision ID is unchanged between calls, then the presentation has not changed. Conversely, a changed ID (for the same presentation and user) usually means the presentation has been updated; however, a changed ID can also be due to internal factors such as ID format changes. "slideProperties": { # The properties of Page that are only relevant for pages with page_type SLIDE. # Slide specific properties. Only set if page_type = SLIDE. + "isSkipped": True or False, # Whether the slide is skipped in the presentation mode. Defaults to false. "layoutObjectId": "A String", # The object ID of the layout that this slide is based on. This property is read-only. "masterObjectId": "A String", # The object ID of the master that this slide is based on. This property is read-only. "notesPage": # Object with schema name: Page # The notes page that this slide is associated with. It defines the visual appearance of a notes page when printing or exporting slides with speaker notes. A notes page inherits properties from the notes master. The placeholder shape with type BODY on the notes page contains the speaker notes for this slide. The ID of this shape is identified by the speakerNotesObjectId field. The notes page is read-only except for the text content and styles of the speaker notes shape. This property is read-only. @@ -10156,6 +10164,7 @@Method Details
"pageType": "A String", # The type of the page. "revisionId": "A String", # The revision ID of the presentation containing this page. Can be used in update requests to assert that the presentation revision hasn't changed since the last read operation. Only populated if the user has edit access to the presentation. The format of the revision ID may change over time, so it should be treated opaquely. A returned revision ID is only guaranteed to be valid for 24 hours after it has been returned and cannot be shared across users. If the revision ID is unchanged between calls, then the presentation has not changed. Conversely, a changed ID (for the same presentation and user) usually means the presentation has been updated; however, a changed ID can also be due to internal factors such as ID format changes. "slideProperties": { # The properties of Page that are only relevant for pages with page_type SLIDE. # Slide specific properties. Only set if page_type = SLIDE. + "isSkipped": True or False, # Whether the slide is skipped in the presentation mode. Defaults to false. "layoutObjectId": "A String", # The object ID of the layout that this slide is based on. This property is read-only. "masterObjectId": "A String", # The object ID of the master that this slide is based on. This property is read-only. "notesPage": # Object with schema name: Page # The notes page that this slide is associated with. It defines the visual appearance of a notes page when printing or exporting slides with speaker notes. A notes page inherits properties from the notes master. The placeholder shape with type BODY on the notes page contains the speaker notes for this slide. The ID of this shape is identified by the speakerNotesObjectId field. The notes page is read-only except for the text content and styles of the speaker notes shape. This property is read-only. @@ -11166,6 +11175,7 @@Method Details
"pageType": "A String", # The type of the page. "revisionId": "A String", # The revision ID of the presentation containing this page. Can be used in update requests to assert that the presentation revision hasn't changed since the last read operation. Only populated if the user has edit access to the presentation. The format of the revision ID may change over time, so it should be treated opaquely. A returned revision ID is only guaranteed to be valid for 24 hours after it has been returned and cannot be shared across users. If the revision ID is unchanged between calls, then the presentation has not changed. Conversely, a changed ID (for the same presentation and user) usually means the presentation has been updated; however, a changed ID can also be due to internal factors such as ID format changes. "slideProperties": { # The properties of Page that are only relevant for pages with page_type SLIDE. # Slide specific properties. Only set if page_type = SLIDE. + "isSkipped": True or False, # Whether the slide is skipped in the presentation mode. Defaults to false. "layoutObjectId": "A String", # The object ID of the layout that this slide is based on. This property is read-only. "masterObjectId": "A String", # The object ID of the master that this slide is based on. This property is read-only. "notesPage": # Object with schema name: Page # The notes page that this slide is associated with. It defines the visual appearance of a notes page when printing or exporting slides with speaker notes. A notes page inherits properties from the notes master. The placeholder shape with type BODY on the notes page contains the speaker notes for this slide. The ID of this shape is identified by the speakerNotesObjectId field. The notes page is read-only except for the text content and styles of the speaker notes shape. This property is read-only. @@ -12174,6 +12184,7 @@Method Details
"pageType": "A String", # The type of the page. "revisionId": "A String", # The revision ID of the presentation containing this page. Can be used in update requests to assert that the presentation revision hasn't changed since the last read operation. Only populated if the user has edit access to the presentation. The format of the revision ID may change over time, so it should be treated opaquely. A returned revision ID is only guaranteed to be valid for 24 hours after it has been returned and cannot be shared across users. If the revision ID is unchanged between calls, then the presentation has not changed. Conversely, a changed ID (for the same presentation and user) usually means the presentation has been updated; however, a changed ID can also be due to internal factors such as ID format changes. "slideProperties": { # The properties of Page that are only relevant for pages with page_type SLIDE. # Slide specific properties. Only set if page_type = SLIDE. + "isSkipped": True or False, # Whether the slide is skipped in the presentation mode. Defaults to false. "layoutObjectId": "A String", # The object ID of the layout that this slide is based on. This property is read-only. "masterObjectId": "A String", # The object ID of the master that this slide is based on. This property is read-only. "notesPage": # Object with schema name: Page # The notes page that this slide is associated with. It defines the visual appearance of a notes page when printing or exporting slides with speaker notes. A notes page inherits properties from the notes master. The placeholder shape with type BODY on the notes page contains the speaker notes for this slide. The ID of this shape is identified by the speakerNotesObjectId field. The notes page is read-only except for the text content and styles of the speaker notes shape. This property is read-only. @@ -13194,6 +13205,7 @@Method Details
"pageType": "A String", # The type of the page. "revisionId": "A String", # The revision ID of the presentation containing this page. Can be used in update requests to assert that the presentation revision hasn't changed since the last read operation. Only populated if the user has edit access to the presentation. The format of the revision ID may change over time, so it should be treated opaquely. A returned revision ID is only guaranteed to be valid for 24 hours after it has been returned and cannot be shared across users. If the revision ID is unchanged between calls, then the presentation has not changed. Conversely, a changed ID (for the same presentation and user) usually means the presentation has been updated; however, a changed ID can also be due to internal factors such as ID format changes. "slideProperties": { # The properties of Page that are only relevant for pages with page_type SLIDE. # Slide specific properties. Only set if page_type = SLIDE. + "isSkipped": True or False, # Whether the slide is skipped in the presentation mode. Defaults to false. "layoutObjectId": "A String", # The object ID of the layout that this slide is based on. This property is read-only. "masterObjectId": "A String", # The object ID of the master that this slide is based on. This property is read-only. "notesPage": # Object with schema name: Page # The notes page that this slide is associated with. It defines the visual appearance of a notes page when printing or exporting slides with speaker notes. A notes page inherits properties from the notes master. The placeholder shape with type BODY on the notes page contains the speaker notes for this slide. The ID of this shape is identified by the speakerNotesObjectId field. The notes page is read-only except for the text content and styles of the speaker notes shape. This property is read-only. diff --git a/docs/dyn/slides_v1.presentations.pages.html b/docs/dyn/slides_v1.presentations.pages.html index e594c79e1f7..89a500b6e26 100644 --- a/docs/dyn/slides_v1.presentations.pages.html +++ b/docs/dyn/slides_v1.presentations.pages.html @@ -1106,6 +1106,7 @@Method Details
"pageType": "A String", # The type of the page. "revisionId": "A String", # The revision ID of the presentation containing this page. Can be used in update requests to assert that the presentation revision hasn't changed since the last read operation. Only populated if the user has edit access to the presentation. The format of the revision ID may change over time, so it should be treated opaquely. A returned revision ID is only guaranteed to be valid for 24 hours after it has been returned and cannot be shared across users. If the revision ID is unchanged between calls, then the presentation has not changed. Conversely, a changed ID (for the same presentation and user) usually means the presentation has been updated; however, a changed ID can also be due to internal factors such as ID format changes. "slideProperties": { # The properties of Page that are only relevant for pages with page_type SLIDE. # Slide specific properties. Only set if page_type = SLIDE. + "isSkipped": True or False, # Whether the slide is skipped in the presentation mode. Defaults to false. "layoutObjectId": "A String", # The object ID of the layout that this slide is based on. This property is read-only. "masterObjectId": "A String", # The object ID of the master that this slide is based on. This property is read-only. "notesPage": # Object with schema name: Page # The notes page that this slide is associated with. It defines the visual appearance of a notes page when printing or exporting slides with speaker notes. A notes page inherits properties from the notes master. The placeholder shape with type BODY on the notes page contains the speaker notes for this slide. The ID of this shape is identified by the speakerNotesObjectId field. The notes page is read-only except for the text content and styles of the speaker notes shape. This property is read-only. diff --git a/docs/dyn/spanner_v1.projects.instances.databases.html b/docs/dyn/spanner_v1.projects.instances.databases.html index aa4b0dc431d..a3a1780ca3c 100644 --- a/docs/dyn/spanner_v1.projects.instances.databases.html +++ b/docs/dyn/spanner_v1.projects.instances.databases.html @@ -455,6 +455,7 @@Method Details
"token": "A String", # The token identifying the message, e.g. 'METRIC_READ_CPU'. This should be unique within the service. }, "startKeyIndex": 42, # The index of the start key in indexed_keys. + "timeOffset": "A String", # The time offset. This is the time since the start of the time interval. "unit": { # A message representing a user-facing string whose value may need to be translated before being displayed. # The unit of the metric. This is an unstructured field and will be mapped as is to the user. "args": { # A map of arguments used when creating the localized message. Keys represent parameter names which may be used by the localized version when substituting dynamic values. "a_key": "A String", diff --git a/docs/dyn/spanner_v1.projects.instances.databases.sessions.html b/docs/dyn/spanner_v1.projects.instances.databases.sessions.html index 1cbdde8390f..a3e75975498 100644 --- a/docs/dyn/spanner_v1.projects.instances.databases.sessions.html +++ b/docs/dyn/spanner_v1.projects.instances.databases.sessions.html @@ -179,7 +179,7 @@Method Details
The object takes the form of: { # The request for BeginTransaction. - "options": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Required. Options for the new transaction. + "options": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Required. Options for the new transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -310,7 +310,7 @@Method Details
"transactionTag": "A String", # A tag used for statistics collection about this transaction. Both request_tag and transaction_tag can be specified for a read or query that belongs to a transaction. The value of transaction_tag should be the same for all requests belonging to the same transaction. If this request doesn’t belong to any transaction, transaction_tag will be ignored. Legal characters for `transaction_tag` values are all printable characters (ASCII 32 - 126) and the length of a transaction_tag is limited to 50 characters. Values that exceed this limit are truncated. }, "returnCommitStats": True or False, # If `true`, then statistics related to the transaction will be included in the CommitResponse. Default value is `false`. - "singleUseTransaction": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute mutations in a temporary transaction. Note that unlike commit of a previously-started transaction, commit with a temporary transaction is non-idempotent. That is, if the `CommitRequest` is sent to Cloud Spanner more than once (for instance, due to retries in the application, or in the transport library), it is possible that the mutations are executed more than once. If this is undesirable, use BeginTransaction and Commit instead. + "singleUseTransaction": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute mutations in a temporary transaction. Note that unlike commit of a previously-started transaction, commit with a temporary transaction is non-idempotent. That is, if the `CommitRequest` is sent to Cloud Spanner more than once (for instance, due to retries in the application, or in the transport library), it is possible that the mutations are executed more than once. If this is undesirable, use BeginTransaction and Commit instead. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -421,14 +421,7 @@Method Details
"a_key": { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a table cell or returned from an SQL query. "arrayElementType": # Object with schema name: Type # If code == ARRAY, then `array_element_type` is the type of the array elements. "code": "A String", # Required. The TypeCode for this type. - "structType": { # `StructType` defines the fields of a STRUCT type. # If code == STRUCT, then `struct_type` provides type information for the struct's fields. - "fields": [ # The list of fields that make up this struct. Order is significant, because values of this struct type are represented as lists, where the order of field values matches the order of fields in the StructType. In turn, the order of fields matches the order of columns in a read request, or the order of fields in the `SELECT` clause of a query. - { # Message representing a single field of a struct. - "name": "A String", # The name of the field. For reads, this is the column name. For SQL queries, it is the column alias (e.g., `"Word"` in the query `"SELECT 'hello' AS Word"`), or the column name (e.g., `"ColName"` in the query `"SELECT ColName FROM Table"`). Some columns might have an empty name (e.g., `"SELECT UPPER(ColName)"`). Note that a query result can contain multiple fields with the same name. - "type": # Object with schema name: Type # The type of the field. - }, - ], - }, + "structType": # Object with schema name: StructType # If code == STRUCT, then `struct_type` provides type information for the struct's fields. }, }, "params": { # Parameter names and values that bind to placeholders in the DML string. A parameter placeholder consists of the `@` character followed by the parameter name (for example, `@firstName`). Parameter names can contain letters, numbers, and underscores. Parameters can appear anywhere that a literal value is expected. The same parameter name can be used more than once, for example: `"WHERE id > @msg_id AND id < @msg_id + 100"` It is an error to execute a SQL statement with unbound parameters. @@ -438,7 +431,7 @@Method Details
}, ], "transaction": { # This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions. # Required. The transaction to use. Must be a read-write transaction. To protect against replays, single-use transactions are not supported. The caller must either supply an existing transaction ID or begin a new transaction. - "begin": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. + "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -453,7 +446,7 @@Method Details
}, }, "id": "A String", # Execute the read or SQL query in a previously-started transaction. - "singleUse": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. + "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -486,7 +479,11 @@Method Details
"fields": [ # The list of fields that make up this struct. Order is significant, because values of this struct type are represented as lists, where the order of field values matches the order of fields in the StructType. In turn, the order of fields matches the order of columns in a read request, or the order of fields in the `SELECT` clause of a query. { # Message representing a single field of a struct. "name": "A String", # The name of the field. For reads, this is the column name. For SQL queries, it is the column alias (e.g., `"Word"` in the query `"SELECT 'hello' AS Word"`), or the column name (e.g., `"ColName"` in the query `"SELECT ColName FROM Table"`). Some columns might have an empty name (e.g., `"SELECT UPPER(ColName)"`). Note that a query result can contain multiple fields with the same name. - "type": # Object with schema name: Type # The type of the field. + "type": { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a table cell or returned from an SQL query. # The type of the field. + "arrayElementType": # Object with schema name: Type # If code == ARRAY, then `array_element_type` is the type of the array elements. + "code": "A String", # Required. The TypeCode for this type. + "structType": # Object with schema name: StructType # If code == STRUCT, then `struct_type` provides type information for the struct's fields. + }, }, ], }, @@ -563,14 +560,7 @@Method Details
"a_key": { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a table cell or returned from an SQL query. "arrayElementType": # Object with schema name: Type # If code == ARRAY, then `array_element_type` is the type of the array elements. "code": "A String", # Required. The TypeCode for this type. - "structType": { # `StructType` defines the fields of a STRUCT type. # If code == STRUCT, then `struct_type` provides type information for the struct's fields. - "fields": [ # The list of fields that make up this struct. Order is significant, because values of this struct type are represented as lists, where the order of field values matches the order of fields in the StructType. In turn, the order of fields matches the order of columns in a read request, or the order of fields in the `SELECT` clause of a query. - { # Message representing a single field of a struct. - "name": "A String", # The name of the field. For reads, this is the column name. For SQL queries, it is the column alias (e.g., `"Word"` in the query `"SELECT 'hello' AS Word"`), or the column name (e.g., `"ColName"` in the query `"SELECT ColName FROM Table"`). Some columns might have an empty name (e.g., `"SELECT UPPER(ColName)"`). Note that a query result can contain multiple fields with the same name. - "type": # Object with schema name: Type # The type of the field. - }, - ], - }, + "structType": # Object with schema name: StructType # If code == STRUCT, then `struct_type` provides type information for the struct's fields. }, }, "params": { # Parameter names and values that bind to placeholders in the SQL string. A parameter placeholder consists of the `@` character followed by the parameter name (for example, `@firstName`). Parameter names must conform to the naming requirements of identifiers as specified at https://cloud.google.com/spanner/docs/lexical#identifiers. Parameters can appear anywhere that a literal value is expected. The same parameter name can be used more than once, for example: `"WHERE id > @msg_id AND id < @msg_id + 100"` It is an error to execute a SQL statement with unbound parameters. @@ -591,7 +581,7 @@Method Details
"seqno": "A String", # A per-transaction sequence number used to identify this request. This field makes each request idempotent such that if the request is received multiple times, at most one will succeed. The sequence number must be monotonically increasing within the transaction. If a request arrives for the first time with an out-of-order sequence number, the transaction may be aborted. Replays of previously handled requests will yield the same response as the first execution. Required for DML statements. Ignored for queries. "sql": "A String", # Required. The SQL string. "transaction": { # This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions. # The transaction to use. For queries, if none is provided, the default is a temporary read-only transaction with strong concurrency. Standard DML statements require a read-write transaction. To protect against replays, single-use transactions are not supported. The caller must either supply an existing transaction ID or begin a new transaction. Partitioned DML requires an existing Partitioned DML transaction ID. - "begin": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. + "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -606,7 +596,7 @@Method Details
}, }, "id": "A String", # Execute the read or SQL query in a previously-started transaction. - "singleUse": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. + "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -637,7 +627,11 @@Method Details
"fields": [ # The list of fields that make up this struct. Order is significant, because values of this struct type are represented as lists, where the order of field values matches the order of fields in the StructType. In turn, the order of fields matches the order of columns in a read request, or the order of fields in the `SELECT` clause of a query. { # Message representing a single field of a struct. "name": "A String", # The name of the field. For reads, this is the column name. For SQL queries, it is the column alias (e.g., `"Word"` in the query `"SELECT 'hello' AS Word"`), or the column name (e.g., `"ColName"` in the query `"SELECT ColName FROM Table"`). Some columns might have an empty name (e.g., `"SELECT UPPER(ColName)"`). Note that a query result can contain multiple fields with the same name. - "type": # Object with schema name: Type # The type of the field. + "type": { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a table cell or returned from an SQL query. # The type of the field. + "arrayElementType": # Object with schema name: Type # If code == ARRAY, then `array_element_type` is the type of the array elements. + "code": "A String", # Required. The TypeCode for this type. + "structType": # Object with schema name: StructType # If code == STRUCT, then `struct_type` provides type information for the struct's fields. + }, }, ], }, @@ -703,14 +697,7 @@Method Details
"a_key": { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a table cell or returned from an SQL query. "arrayElementType": # Object with schema name: Type # If code == ARRAY, then `array_element_type` is the type of the array elements. "code": "A String", # Required. The TypeCode for this type. - "structType": { # `StructType` defines the fields of a STRUCT type. # If code == STRUCT, then `struct_type` provides type information for the struct's fields. - "fields": [ # The list of fields that make up this struct. Order is significant, because values of this struct type are represented as lists, where the order of field values matches the order of fields in the StructType. In turn, the order of fields matches the order of columns in a read request, or the order of fields in the `SELECT` clause of a query. - { # Message representing a single field of a struct. - "name": "A String", # The name of the field. For reads, this is the column name. For SQL queries, it is the column alias (e.g., `"Word"` in the query `"SELECT 'hello' AS Word"`), or the column name (e.g., `"ColName"` in the query `"SELECT ColName FROM Table"`). Some columns might have an empty name (e.g., `"SELECT UPPER(ColName)"`). Note that a query result can contain multiple fields with the same name. - "type": # Object with schema name: Type # The type of the field. - }, - ], - }, + "structType": # Object with schema name: StructType # If code == STRUCT, then `struct_type` provides type information for the struct's fields. }, }, "params": { # Parameter names and values that bind to placeholders in the SQL string. A parameter placeholder consists of the `@` character followed by the parameter name (for example, `@firstName`). Parameter names must conform to the naming requirements of identifiers as specified at https://cloud.google.com/spanner/docs/lexical#identifiers. Parameters can appear anywhere that a literal value is expected. The same parameter name can be used more than once, for example: `"WHERE id > @msg_id AND id < @msg_id + 100"` It is an error to execute a SQL statement with unbound parameters. @@ -731,7 +718,7 @@Method Details
"seqno": "A String", # A per-transaction sequence number used to identify this request. This field makes each request idempotent such that if the request is received multiple times, at most one will succeed. The sequence number must be monotonically increasing within the transaction. If a request arrives for the first time with an out-of-order sequence number, the transaction may be aborted. Replays of previously handled requests will yield the same response as the first execution. Required for DML statements. Ignored for queries. "sql": "A String", # Required. The SQL string. "transaction": { # This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions. # The transaction to use. For queries, if none is provided, the default is a temporary read-only transaction with strong concurrency. Standard DML statements require a read-write transaction. To protect against replays, single-use transactions are not supported. The caller must either supply an existing transaction ID or begin a new transaction. Partitioned DML requires an existing Partitioned DML transaction ID. - "begin": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. + "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -746,7 +733,7 @@Method Details
}, }, "id": "A String", # Execute the read or SQL query in a previously-started transaction. - "singleUse": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. + "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -778,7 +765,11 @@Method Details
"fields": [ # The list of fields that make up this struct. Order is significant, because values of this struct type are represented as lists, where the order of field values matches the order of fields in the StructType. In turn, the order of fields matches the order of columns in a read request, or the order of fields in the `SELECT` clause of a query. { # Message representing a single field of a struct. "name": "A String", # The name of the field. For reads, this is the column name. For SQL queries, it is the column alias (e.g., `"Word"` in the query `"SELECT 'hello' AS Word"`), or the column name (e.g., `"ColName"` in the query `"SELECT ColName FROM Table"`). Some columns might have an empty name (e.g., `"SELECT UPPER(ColName)"`). Note that a query result can contain multiple fields with the same name. - "type": # Object with schema name: Type # The type of the field. + "type": { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a table cell or returned from an SQL query. # The type of the field. + "arrayElementType": # Object with schema name: Type # If code == ARRAY, then `array_element_type` is the type of the array elements. + "code": "A String", # Required. The TypeCode for this type. + "structType": # Object with schema name: StructType # If code == STRUCT, then `struct_type` provides type information for the struct's fields. + }, }, ], }, @@ -913,14 +904,7 @@Method Details
"a_key": { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a table cell or returned from an SQL query. "arrayElementType": # Object with schema name: Type # If code == ARRAY, then `array_element_type` is the type of the array elements. "code": "A String", # Required. The TypeCode for this type. - "structType": { # `StructType` defines the fields of a STRUCT type. # If code == STRUCT, then `struct_type` provides type information for the struct's fields. - "fields": [ # The list of fields that make up this struct. Order is significant, because values of this struct type are represented as lists, where the order of field values matches the order of fields in the StructType. In turn, the order of fields matches the order of columns in a read request, or the order of fields in the `SELECT` clause of a query. - { # Message representing a single field of a struct. - "name": "A String", # The name of the field. For reads, this is the column name. For SQL queries, it is the column alias (e.g., `"Word"` in the query `"SELECT 'hello' AS Word"`), or the column name (e.g., `"ColName"` in the query `"SELECT ColName FROM Table"`). Some columns might have an empty name (e.g., `"SELECT UPPER(ColName)"`). Note that a query result can contain multiple fields with the same name. - "type": # Object with schema name: Type # The type of the field. - }, - ], - }, + "structType": # Object with schema name: StructType # If code == STRUCT, then `struct_type` provides type information for the struct's fields. }, }, "params": { # Parameter names and values that bind to placeholders in the SQL string. A parameter placeholder consists of the `@` character followed by the parameter name (for example, `@firstName`). Parameter names can contain letters, numbers, and underscores. Parameters can appear anywhere that a literal value is expected. The same parameter name can be used more than once, for example: `"WHERE id > @msg_id AND id < @msg_id + 100"` It is an error to execute a SQL statement with unbound parameters. @@ -932,7 +916,7 @@Method Details
}, "sql": "A String", # Required. The query request to generate partitions for. The request will fail if the query is not root partitionable. The query plan of a root partitionable query has a single distributed union operator. A distributed union operator conceptually divides one or more tables into multiple splits, remotely evaluates a subquery independently on each split, and then unions all results. This must not contain DML commands, such as INSERT, UPDATE, or DELETE. Use ExecuteStreamingSql with a PartitionedDml transaction for large, partition-friendly DML operations. "transaction": { # This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions. # Read only snapshot transactions are supported, read/write and single use transactions are not. - "begin": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. + "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -947,7 +931,7 @@Method Details
}, }, "id": "A String", # Execute the read or SQL query in a previously-started transaction. - "singleUse": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. + "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -1029,7 +1013,7 @@Method Details
}, "table": "A String", # Required. The name of the table in the database to be read. "transaction": { # This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions. # Read only snapshot transactions are supported, read/write and single use transactions are not. - "begin": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. + "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -1044,7 +1028,7 @@Method Details
}, }, "id": "A String", # Execute the read or SQL query in a previously-started transaction. - "singleUse": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. + "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -1130,7 +1114,7 @@Method Details
"resumeToken": "A String", # If this request is resuming a previously interrupted read, `resume_token` should be copied from the last PartialResultSet yielded before the interruption. Doing this enables the new read to resume where the last read left off. The rest of the request parameters must exactly match the request that yielded this token. "table": "A String", # Required. The name of the table in the database to be read. "transaction": { # This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions. # The transaction to use. If none is provided, the default is a temporary read-only transaction with strong concurrency. - "begin": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. + "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -1145,7 +1129,7 @@Method Details
}, }, "id": "A String", # Execute the read or SQL query in a previously-started transaction. - "singleUse": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. + "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -1176,7 +1160,11 @@Method Details
"fields": [ # The list of fields that make up this struct. Order is significant, because values of this struct type are represented as lists, where the order of field values matches the order of fields in the StructType. In turn, the order of fields matches the order of columns in a read request, or the order of fields in the `SELECT` clause of a query. { # Message representing a single field of a struct. "name": "A String", # The name of the field. For reads, this is the column name. For SQL queries, it is the column alias (e.g., `"Word"` in the query `"SELECT 'hello' AS Word"`), or the column name (e.g., `"ColName"` in the query `"SELECT ColName FROM Table"`). Some columns might have an empty name (e.g., `"SELECT UPPER(ColName)"`). Note that a query result can contain multiple fields with the same name. - "type": # Object with schema name: Type # The type of the field. + "type": { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a table cell or returned from an SQL query. # The type of the field. + "arrayElementType": # Object with schema name: Type # If code == ARRAY, then `array_element_type` is the type of the array elements. + "code": "A String", # Required. The TypeCode for this type. + "structType": # Object with schema name: StructType # If code == STRUCT, then `struct_type` provides type information for the struct's fields. + }, }, ], }, @@ -1301,7 +1289,7 @@Method Details
"resumeToken": "A String", # If this request is resuming a previously interrupted read, `resume_token` should be copied from the last PartialResultSet yielded before the interruption. Doing this enables the new read to resume where the last read left off. The rest of the request parameters must exactly match the request that yielded this token. "table": "A String", # Required. The name of the table in the database to be read. "transaction": { # This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions. # The transaction to use. If none is provided, the default is a temporary read-only transaction with strong concurrency. - "begin": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. + "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -1316,7 +1304,7 @@Method Details
}, }, "id": "A String", # Execute the read or SQL query in a previously-started transaction. - "singleUse": { # # Transactions Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. # Transaction Modes Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. ## Locking Read-Write Transactions Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. ## Semantics Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. ## Retrying Aborted Transactions When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. ## Idle Transactions A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. ## Snapshot Read-Only Transactions Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. ## Strong Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. ## Exact Staleness These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. ## Bounded Staleness Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. ## Old Read Timestamps and Garbage Collection Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. ## Partitioned DML Transactions Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. + "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction Modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. This transaction type provides guaranteed consistency across several reads, but does not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past. Snapshot read-only transactions do not need to be committed. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read/write data in a single database. They may, however, read/write data in different tables within that database. Locking Read-Write Transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying Aborted Transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of wall time spent retrying. Idle Transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (e.g., `SELECT 1`) prevents the transaction from becoming idle. Snapshot Read-Only Transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transaction, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. See TransactionOptions.ReadOnly.strong. Exact Staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp <= the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded Staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old Read Timestamps and Garbage Collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. Partitioned DML Transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, "readOnly": { # Message type to initiate a read-only transaction. # Transaction will not write. Authorization to begin a read-only transaction requires `spanner.databases.beginReadOnlyTransaction` permission on the `session` resource. @@ -1348,7 +1336,11 @@Method Details
"fields": [ # The list of fields that make up this struct. Order is significant, because values of this struct type are represented as lists, where the order of field values matches the order of fields in the StructType. In turn, the order of fields matches the order of columns in a read request, or the order of fields in the `SELECT` clause of a query. { # Message representing a single field of a struct. "name": "A String", # The name of the field. For reads, this is the column name. For SQL queries, it is the column alias (e.g., `"Word"` in the query `"SELECT 'hello' AS Word"`), or the column name (e.g., `"ColName"` in the query `"SELECT ColName FROM Table"`). Some columns might have an empty name (e.g., `"SELECT UPPER(ColName)"`). Note that a query result can contain multiple fields with the same name. - "type": # Object with schema name: Type # The type of the field. + "type": { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a table cell or returned from an SQL query. # The type of the field. + "arrayElementType": # Object with schema name: Type # If code == ARRAY, then `array_element_type` is the type of the array elements. + "code": "A String", # Required. The TypeCode for this type. + "structType": # Object with schema name: StructType # If code == STRUCT, then `struct_type` provides type information for the struct's fields. + }, }, ], }, diff --git a/docs/dyn/spanner_v1.scans.html b/docs/dyn/spanner_v1.scans.html index 61d0572cc50..ac8eee55218 100644 --- a/docs/dyn/spanner_v1.scans.html +++ b/docs/dyn/spanner_v1.scans.html @@ -239,6 +239,7 @@Method Details
"token": "A String", # The token identifying the message, e.g. 'METRIC_READ_CPU'. This should be unique within the service. }, "startKeyIndex": 42, # The index of the start key in indexed_keys. + "timeOffset": "A String", # The time offset. This is the time since the start of the time interval. "unit": { # A message representing a user-facing string whose value may need to be translated before being displayed. # The unit of the metric. This is an unstructured field and will be mapped as is to the user. "args": { # A map of arguments used when creating the localized message. Keys represent parameter names which may be used by the localized version when substituting dynamic values. "a_key": "A String", diff --git a/docs/dyn/storagetransfer_v1.googleServiceAccounts.html b/docs/dyn/storagetransfer_v1.googleServiceAccounts.html index cc40f18fdc5..9c70c101d18 100644 --- a/docs/dyn/storagetransfer_v1.googleServiceAccounts.html +++ b/docs/dyn/storagetransfer_v1.googleServiceAccounts.html @@ -102,6 +102,7 @@Method Details
{ # Google service account "accountEmail": "A String", # Email address of the service account. + "subjectId": "A String", # Unique identifier for the service account. }