From b73cd9485f8713ac42e7efa9bfd952f67368b778 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 30 Aug 2021 20:32:59 -0700 Subject: [PATCH] feat: add prediction service RPC RawPredict to aiplatform_v1beta1 feat: add tensorboard service RPCs to aiplatform_v1beta1: BatchCreateTensorboardRuns, BatchCreateTensorboardTimeSeries, WriteTensorboardExperimentData feat: add model_deployment_monitori... (#670) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: add prediction service RPC RawPredict to aiplatform_v1beta1 feat: add tensorboard service RPCs to aiplatform_v1beta1: BatchCreateTensorboardRuns, BatchCreateTensorboardTimeSeries, WriteTensorboardExperimentData feat: add model_deployment_monitoring_job to Endpoint in aiplatform_v1beta1 feat: add deployment_group to DeployedIndex in aiplatform_v1beta1 feat: add ModelEvaluationExplanationSpec in aiplatform_v1beta1 Committer: @dizcology PiperOrigin-RevId: 393890669 Source-Link: https://github.com/googleapis/googleapis/commit/321abab2143fed12030b32b2b20aecdc3a5070ed Source-Link: https://github.com/googleapis/googleapis-gen/commit/629290d281933863e533a31874824ebcb449cccb * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot --- google/cloud/aiplatform_v1beta1/__init__.py | 14 + .../aiplatform_v1beta1/gapic_metadata.json | 40 + .../services/dataset_service/async_client.py | 4 +- .../services/dataset_service/client.py | 4 +- .../dataset_service/transports/grpc.py | 3 + .../transports/grpc_asyncio.py | 3 + .../services/endpoint_service/async_client.py | 8 +- .../services/endpoint_service/client.py | 22 +- .../endpoint_service/transports/grpc.py | 2 + .../transports/grpc_asyncio.py | 2 + .../services/metadata_service/async_client.py | 9 +- .../services/metadata_service/client.py | 9 +- .../metadata_service/transports/grpc.py | 3 +- .../transports/grpc_asyncio.py | 3 +- .../services/migration_service/client.py | 22 +- .../prediction_service/async_client.py | 147 +++ .../services/prediction_service/client.py | 147 +++ .../prediction_service/transports/base.py | 13 + .../prediction_service/transports/grpc.py | 28 + .../transports/grpc_asyncio.py | 30 + .../specialist_pool_service/async_client.py | 41 +- .../specialist_pool_service/client.py | 41 +- .../tensorboard_service/async_client.py | 280 +++++- .../services/tensorboard_service/client.py | 294 +++++- .../tensorboard_service/transports/base.py | 51 ++ .../tensorboard_service/transports/grpc.py | 98 ++ .../transports/grpc_asyncio.py | 98 ++ .../aiplatform_v1beta1/types/__init__.py | 14 + .../aiplatform_v1beta1/types/artifact.py | 1 + .../cloud/aiplatform_v1beta1/types/context.py | 1 + .../aiplatform_v1beta1/types/custom_job.py | 35 +- .../cloud/aiplatform_v1beta1/types/dataset.py | 3 + .../aiplatform_v1beta1/types/endpoint.py | 8 +- .../aiplatform_v1beta1/types/execution.py | 1 + .../aiplatform_v1beta1/types/explanation.py | 2 +- .../types/explanation_metadata.py | 26 +- .../aiplatform_v1beta1/types/featurestore.py | 8 +- .../types/index_endpoint.py | 17 + .../aiplatform_v1beta1/types/index_service.py | 3 + .../types/metadata_service.py | 26 +- .../types/migration_service.py | 2 +- .../cloud/aiplatform_v1beta1/types/model.py | 8 +- .../types/model_deployment_monitoring_job.py | 15 + .../types/pipeline_service.py | 69 +- .../types/prediction_service.py | 38 + .../types/specialist_pool.py | 17 +- .../cloud/aiplatform_v1beta1/types/study.py | 22 +- .../types/tensorboard_run.py | 20 + .../types/tensorboard_service.py | 123 ++- .../test_dataset_service.py | 8 + .../test_endpoint_service.py | 80 +- .../test_migration_service.py | 40 +- .../test_prediction_service.py | 225 +++++ .../test_tensorboard_service.py | 853 +++++++++++++++++- 54 files changed, 2839 insertions(+), 242 deletions(-) diff --git a/google/cloud/aiplatform_v1beta1/__init__.py b/google/cloud/aiplatform_v1beta1/__init__.py index 6481a4c0bd..156091fa24 100644 --- a/google/cloud/aiplatform_v1beta1/__init__.py +++ b/google/cloud/aiplatform_v1beta1/__init__.py @@ -376,6 +376,7 @@ from .types.prediction_service import ExplainResponse from .types.prediction_service import PredictRequest from .types.prediction_service import PredictResponse +from .types.prediction_service import RawPredictRequest from .types.specialist_pool import SpecialistPool from .types.specialist_pool_service import CreateSpecialistPoolOperationMetadata from .types.specialist_pool_service import CreateSpecialistPoolRequest @@ -398,6 +399,10 @@ from .types.tensorboard_data import TimeSeriesDataPoint from .types.tensorboard_experiment import TensorboardExperiment from .types.tensorboard_run import TensorboardRun +from .types.tensorboard_service import BatchCreateTensorboardRunsRequest +from .types.tensorboard_service import BatchCreateTensorboardRunsResponse +from .types.tensorboard_service import BatchCreateTensorboardTimeSeriesRequest +from .types.tensorboard_service import BatchCreateTensorboardTimeSeriesResponse from .types.tensorboard_service import CreateTensorboardExperimentRequest from .types.tensorboard_service import CreateTensorboardOperationMetadata from .types.tensorboard_service import CreateTensorboardRequest @@ -430,6 +435,8 @@ from .types.tensorboard_service import UpdateTensorboardRequest from .types.tensorboard_service import UpdateTensorboardRunRequest from .types.tensorboard_service import UpdateTensorboardTimeSeriesRequest +from .types.tensorboard_service import WriteTensorboardExperimentDataRequest +from .types.tensorboard_service import WriteTensorboardExperimentDataResponse from .types.tensorboard_service import WriteTensorboardRunDataRequest from .types.tensorboard_service import WriteTensorboardRunDataResponse from .types.tensorboard_time_series import TensorboardTimeSeries @@ -503,6 +510,10 @@ "BatchCreateFeaturesOperationMetadata", "BatchCreateFeaturesRequest", "BatchCreateFeaturesResponse", + "BatchCreateTensorboardRunsRequest", + "BatchCreateTensorboardRunsResponse", + "BatchCreateTensorboardTimeSeriesRequest", + "BatchCreateTensorboardTimeSeriesResponse", "BatchDedicatedResources", "BatchMigrateResourcesOperationMetadata", "BatchMigrateResourcesRequest", @@ -825,6 +836,7 @@ "QueryArtifactLineageSubgraphRequest", "QueryContextLineageSubgraphRequest", "QueryExecutionInputsAndOutputsRequest", + "RawPredictRequest", "ReadFeatureValuesRequest", "ReadFeatureValuesResponse", "ReadTensorboardBlobDataRequest", @@ -906,6 +918,8 @@ "Value", "VizierServiceClient", "WorkerPoolSpec", + "WriteTensorboardExperimentDataRequest", + "WriteTensorboardExperimentDataResponse", "WriteTensorboardRunDataRequest", "WriteTensorboardRunDataResponse", "XraiAttribution", diff --git a/google/cloud/aiplatform_v1beta1/gapic_metadata.json b/google/cloud/aiplatform_v1beta1/gapic_metadata.json index 55544714a5..deca198f33 100644 --- a/google/cloud/aiplatform_v1beta1/gapic_metadata.json +++ b/google/cloud/aiplatform_v1beta1/gapic_metadata.json @@ -1493,6 +1493,11 @@ "methods": [ "predict" ] + }, + "RawPredict": { + "methods": [ + "raw_predict" + ] } } }, @@ -1508,6 +1513,11 @@ "methods": [ "predict" ] + }, + "RawPredict": { + "methods": [ + "raw_predict" + ] } } } @@ -1582,6 +1592,16 @@ "grpc": { "libraryClient": "TensorboardServiceClient", "rpcs": { + "BatchCreateTensorboardRuns": { + "methods": [ + "batch_create_tensorboard_runs" + ] + }, + "BatchCreateTensorboardTimeSeries": { + "methods": [ + "batch_create_tensorboard_time_series" + ] + }, "CreateTensorboard": { "methods": [ "create_tensorboard" @@ -1697,6 +1717,11 @@ "update_tensorboard_time_series" ] }, + "WriteTensorboardExperimentData": { + "methods": [ + "write_tensorboard_experiment_data" + ] + }, "WriteTensorboardRunData": { "methods": [ "write_tensorboard_run_data" @@ -1707,6 +1732,16 @@ "grpc-async": { "libraryClient": "TensorboardServiceAsyncClient", "rpcs": { + "BatchCreateTensorboardRuns": { + "methods": [ + "batch_create_tensorboard_runs" + ] + }, + "BatchCreateTensorboardTimeSeries": { + "methods": [ + "batch_create_tensorboard_time_series" + ] + }, "CreateTensorboard": { "methods": [ "create_tensorboard" @@ -1822,6 +1857,11 @@ "update_tensorboard_time_series" ] }, + "WriteTensorboardExperimentData": { + "methods": [ + "write_tensorboard_experiment_data" + ] + }, "WriteTensorboardRunData": { "methods": [ "write_tensorboard_run_data" diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py index 39119b99fd..9cc97d8722 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py @@ -47,7 +47,9 @@ class DatasetServiceAsyncClient: - """""" + """The service that handles the CRUD of Vertex AI Dataset and + its child resources. + """ _client: DatasetServiceClient diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py index a03c0fd0b6..25a2c78ae4 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py @@ -84,7 +84,9 @@ def get_transport_class(cls, label: str = None,) -> Type[DatasetServiceTransport class DatasetServiceClient(metaclass=DatasetServiceClientMeta): - """""" + """The service that handles the CRUD of Vertex AI Dataset and + its child resources. + """ @staticmethod def _get_default_mtls_endpoint(api_endpoint): diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py index 1f3a91100f..ccdc689d25 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py @@ -36,6 +36,9 @@ class DatasetServiceGrpcTransport(DatasetServiceTransport): """gRPC backend transport for DatasetService. + The service that handles the CRUD of Vertex AI Dataset and + its child resources. + This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation and call it. diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py index 4829c85e90..a11f9e1062 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py @@ -38,6 +38,9 @@ class DatasetServiceGrpcAsyncIOTransport(DatasetServiceTransport): """gRPC AsyncIO backend transport for DatasetService. + The service that handles the CRUD of Vertex AI Dataset and + its child resources. + This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation and call it. diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py index 1daadb7439..4a16494322 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py @@ -43,7 +43,7 @@ class EndpointServiceAsyncClient: - """""" + """A service for managing Vertex AI's Endpoints.""" _client: EndpointServiceClient @@ -54,6 +54,12 @@ class EndpointServiceAsyncClient: parse_endpoint_path = staticmethod(EndpointServiceClient.parse_endpoint_path) model_path = staticmethod(EndpointServiceClient.model_path) parse_model_path = staticmethod(EndpointServiceClient.parse_model_path) + model_deployment_monitoring_job_path = staticmethod( + EndpointServiceClient.model_deployment_monitoring_job_path + ) + parse_model_deployment_monitoring_job_path = staticmethod( + EndpointServiceClient.parse_model_deployment_monitoring_job_path + ) network_path = staticmethod(EndpointServiceClient.network_path) parse_network_path = staticmethod(EndpointServiceClient.parse_network_path) common_billing_account_path = staticmethod( diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py index d7660217b8..c165f350a4 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py @@ -80,7 +80,7 @@ def get_transport_class(cls, label: str = None,) -> Type[EndpointServiceTranspor class EndpointServiceClient(metaclass=EndpointServiceClientMeta): - """""" + """A service for managing Vertex AI's Endpoints.""" @staticmethod def _get_default_mtls_endpoint(api_endpoint): @@ -196,6 +196,26 @@ def parse_model_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def model_deployment_monitoring_job_path( + project: str, location: str, model_deployment_monitoring_job: str, + ) -> str: + """Returns a fully-qualified model_deployment_monitoring_job string.""" + return "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format( + project=project, + location=location, + model_deployment_monitoring_job=model_deployment_monitoring_job, + ) + + @staticmethod + def parse_model_deployment_monitoring_job_path(path: str) -> Dict[str, str]: + """Parses a model_deployment_monitoring_job path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/modelDeploymentMonitoringJobs/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def network_path(project: str, network: str,) -> str: """Returns a fully-qualified network string.""" diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py index 8a5a60b754..876c624304 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py @@ -35,6 +35,8 @@ class EndpointServiceGrpcTransport(EndpointServiceTransport): """gRPC backend transport for EndpointService. + A service for managing Vertex AI's Endpoints. + This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation and call it. diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py index 4c8f08d88c..899b7af023 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py @@ -37,6 +37,8 @@ class EndpointServiceGrpcAsyncIOTransport(EndpointServiceTransport): """gRPC AsyncIO backend transport for EndpointService. + A service for managing Vertex AI's Endpoints. + This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation and call it. diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py index 8a1f529dfc..fbea47fb31 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py @@ -468,7 +468,8 @@ async def delete_metadata_store( timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: - r"""Deletes a single MetadataStore. + r"""Deletes a single MetadataStore and all its child + resources (Artifacts, Executions, and Contexts). Args: request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteMetadataStoreRequest`): @@ -830,6 +831,8 @@ async def update_artifact( update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): Required. A FieldMask indicating which fields should be updated. + Functionality of this field is not yet + supported. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1339,6 +1342,8 @@ async def update_context( update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): Required. A FieldMask indicating which fields should be updated. + Functionality of this field is not yet + supported. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -2117,6 +2122,8 @@ async def update_execution( update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): Required. A FieldMask indicating which fields should be updated. + Functionality of this field is not yet + supported. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py index 1b813d80a8..8fd792f79e 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py @@ -729,7 +729,8 @@ def delete_metadata_store( timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gac_operation.Operation: - r"""Deletes a single MetadataStore. + r"""Deletes a single MetadataStore and all its child + resources (Artifacts, Executions, and Contexts). Args: request (google.cloud.aiplatform_v1beta1.types.DeleteMetadataStoreRequest): @@ -1091,6 +1092,8 @@ def update_artifact( update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. A FieldMask indicating which fields should be updated. + Functionality of this field is not yet + supported. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1600,6 +1603,8 @@ def update_context( update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. A FieldMask indicating which fields should be updated. + Functionality of this field is not yet + supported. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -2384,6 +2389,8 @@ def update_execution( update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. A FieldMask indicating which fields should be updated. + Functionality of this field is not yet + supported. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py index bf2c14eb95..616e305415 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py @@ -346,7 +346,8 @@ def delete_metadata_store( ]: r"""Return a callable for the delete metadata store method over gRPC. - Deletes a single MetadataStore. + Deletes a single MetadataStore and all its child + resources (Artifacts, Executions, and Contexts). Returns: Callable[[~.DeleteMetadataStoreRequest], diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py index a8b7e908bb..b8d93250d8 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py @@ -354,7 +354,8 @@ def delete_metadata_store( ]: r"""Return a callable for the delete metadata store method over gRPC. - Deletes a single MetadataStore. + Deletes a single MetadataStore and all its child + resources (Artifacts, Executions, and Contexts). Returns: Callable[[~.DeleteMetadataStoreRequest], diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py index 874e62b317..4810778f39 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py @@ -179,16 +179,19 @@ def parse_annotated_dataset_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def dataset_path(project: str, dataset: str,) -> str: + def dataset_path(project: str, location: str, dataset: str,) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( - project=project, dataset=dataset, + return "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod @@ -208,19 +211,16 @@ def parse_dataset_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def dataset_path(project: str, location: str, dataset: str,) -> str: + def dataset_path(project: str, dataset: str,) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( - project=project, location=location, dataset=dataset, + return "projects/{project}/datasets/{dataset}".format( + project=project, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py index 0ea7d71bac..af215634e9 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py @@ -26,8 +26,10 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +from google.api import httpbody_pb2 # type: ignore from google.cloud.aiplatform_v1beta1.types import explanation from google.cloud.aiplatform_v1beta1.types import prediction_service +from google.protobuf import any_pb2 # type: ignore from google.protobuf import struct_pb2 # type: ignore from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport @@ -267,6 +269,151 @@ async def predict( # Done; return the response. return response + async def raw_predict( + self, + request: prediction_service.RawPredictRequest = None, + *, + endpoint: str = None, + http_body: httpbody_pb2.HttpBody = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> httpbody_pb2.HttpBody: + r"""Perform an online prediction with arbitrary http + payload. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.RawPredictRequest`): + The request object. Request message for + [PredictionService.RawPredict][google.cloud.aiplatform.v1beta1.PredictionService.RawPredict]. + endpoint (:class:`str`): + Required. The name of the Endpoint requested to serve + the prediction. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + http_body (:class:`google.api.httpbody_pb2.HttpBody`): + The prediction input. Supports HTTP headers and + arbitrary data payload. + + A + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + may have an upper limit on the number of instances it + supports per request. When this limit it is exceeded for + an AutoML model, the + [RawPredict][google.cloud.aiplatform.v1beta1.PredictionService.RawPredict] + method returns an error. When this limit is exceeded for + a custom-trained model, the behavior varies depending on + the model. + + You can specify the schema for each instance in the + [predict_schemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] + field when you create a + [Model][google.cloud.aiplatform.v1beta1.Model]. This + schema applies when you deploy the ``Model`` as a + ``DeployedModel`` to an + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] and + use the ``RawPredict`` method. + + This corresponds to the ``http_body`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api.httpbody_pb2.HttpBody: + Message that represents an arbitrary HTTP body. It should only be used for + payload formats that can't be represented as JSON, + such as raw binary or an HTML page. + + This message can be used both in streaming and + non-streaming API methods in the request as well as + the response. + + It can be used as a top-level request field, which is + convenient if one wants to extract parameters from + either the URL or HTTP template into the request + fields and also want access to the raw HTTP body. + + Example: + + message GetResourceRequest { + // A unique request id. string request_id = 1; + + // The raw HTTP body is bound to this field. + google.api.HttpBody http_body = 2; + + } + + service ResourceService { + rpc GetResource(GetResourceRequest) returns + (google.api.HttpBody); rpc + UpdateResource(google.api.HttpBody) returns + (google.protobuf.Empty); + + } + + Example with streaming methods: + + service CaldavService { + rpc GetCalendar(stream google.api.HttpBody) + returns (stream google.api.HttpBody); + + rpc UpdateCalendar(stream google.api.HttpBody) + returns (stream google.api.HttpBody); + + } + + Use of this type only changes how the request and + response bodies are handled, all other features will + continue to work unchanged. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, http_body]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = prediction_service.RawPredictRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if http_body is not None: + request.http_body = http_body + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.raw_predict, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("endpoint", request.endpoint),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + async def explain( self, request: prediction_service.ExplainRequest = None, diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py index 7ef1133b40..7e7104474c 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py @@ -30,8 +30,10 @@ from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +from google.api import httpbody_pb2 # type: ignore from google.cloud.aiplatform_v1beta1.types import explanation from google.cloud.aiplatform_v1beta1.types import prediction_service +from google.protobuf import any_pb2 # type: ignore from google.protobuf import struct_pb2 # type: ignore from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import PredictionServiceGrpcTransport @@ -458,6 +460,151 @@ def predict( # Done; return the response. return response + def raw_predict( + self, + request: prediction_service.RawPredictRequest = None, + *, + endpoint: str = None, + http_body: httpbody_pb2.HttpBody = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> httpbody_pb2.HttpBody: + r"""Perform an online prediction with arbitrary http + payload. + + Args: + request (google.cloud.aiplatform_v1beta1.types.RawPredictRequest): + The request object. Request message for + [PredictionService.RawPredict][google.cloud.aiplatform.v1beta1.PredictionService.RawPredict]. + endpoint (str): + Required. The name of the Endpoint requested to serve + the prediction. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + http_body (google.api.httpbody_pb2.HttpBody): + The prediction input. Supports HTTP headers and + arbitrary data payload. + + A + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + may have an upper limit on the number of instances it + supports per request. When this limit it is exceeded for + an AutoML model, the + [RawPredict][google.cloud.aiplatform.v1beta1.PredictionService.RawPredict] + method returns an error. When this limit is exceeded for + a custom-trained model, the behavior varies depending on + the model. + + You can specify the schema for each instance in the + [predict_schemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] + field when you create a + [Model][google.cloud.aiplatform.v1beta1.Model]. This + schema applies when you deploy the ``Model`` as a + ``DeployedModel`` to an + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] and + use the ``RawPredict`` method. + + This corresponds to the ``http_body`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api.httpbody_pb2.HttpBody: + Message that represents an arbitrary HTTP body. It should only be used for + payload formats that can't be represented as JSON, + such as raw binary or an HTML page. + + This message can be used both in streaming and + non-streaming API methods in the request as well as + the response. + + It can be used as a top-level request field, which is + convenient if one wants to extract parameters from + either the URL or HTTP template into the request + fields and also want access to the raw HTTP body. + + Example: + + message GetResourceRequest { + // A unique request id. string request_id = 1; + + // The raw HTTP body is bound to this field. + google.api.HttpBody http_body = 2; + + } + + service ResourceService { + rpc GetResource(GetResourceRequest) returns + (google.api.HttpBody); rpc + UpdateResource(google.api.HttpBody) returns + (google.protobuf.Empty); + + } + + Example with streaming methods: + + service CaldavService { + rpc GetCalendar(stream google.api.HttpBody) + returns (stream google.api.HttpBody); + + rpc UpdateCalendar(stream google.api.HttpBody) + returns (stream google.api.HttpBody); + + } + + Use of this type only changes how the request and + response bodies are handled, all other features will + continue to work unchanged. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, http_body]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a prediction_service.RawPredictRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, prediction_service.RawPredictRequest): + request = prediction_service.RawPredictRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if http_body is not None: + request.http_body = http_body + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.raw_predict] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("endpoint", request.endpoint),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + def explain( self, request: prediction_service.ExplainRequest = None, diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py index ad5ea5f387..535940029d 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py @@ -26,6 +26,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +from google.api import httpbody_pb2 # type: ignore from google.cloud.aiplatform_v1beta1.types import prediction_service try: @@ -157,6 +158,9 @@ def _prep_wrapped_messages(self, client_info): self.predict: gapic_v1.method.wrap_method( self.predict, default_timeout=5.0, client_info=client_info, ), + self.raw_predict: gapic_v1.method.wrap_method( + self.raw_predict, default_timeout=None, client_info=client_info, + ), self.explain: gapic_v1.method.wrap_method( self.explain, default_timeout=5.0, client_info=client_info, ), @@ -174,6 +178,15 @@ def predict( ]: raise NotImplementedError() + @property + def raw_predict( + self, + ) -> Callable[ + [prediction_service.RawPredictRequest], + Union[httpbody_pb2.HttpBody, Awaitable[httpbody_pb2.HttpBody]], + ]: + raise NotImplementedError() + @property def explain( self, diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py index cab375b06b..0914e151f7 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py @@ -24,6 +24,7 @@ import grpc # type: ignore +from google.api import httpbody_pb2 # type: ignore from google.cloud.aiplatform_v1beta1.types import prediction_service from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO @@ -253,6 +254,33 @@ def predict( ) return self._stubs["predict"] + @property + def raw_predict( + self, + ) -> Callable[[prediction_service.RawPredictRequest], httpbody_pb2.HttpBody]: + r"""Return a callable for the raw predict method over gRPC. + + Perform an online prediction with arbitrary http + payload. + + Returns: + Callable[[~.RawPredictRequest], + ~.HttpBody]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "raw_predict" not in self._stubs: + self._stubs["raw_predict"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.PredictionService/RawPredict", + request_serializer=prediction_service.RawPredictRequest.serialize, + response_deserializer=httpbody_pb2.HttpBody.FromString, + ) + return self._stubs["raw_predict"] + @property def explain( self, diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py index a2e08aa417..ea9ba4af2d 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py @@ -25,6 +25,7 @@ import grpc # type: ignore from grpc.experimental import aio # type: ignore +from google.api import httpbody_pb2 # type: ignore from google.cloud.aiplatform_v1beta1.types import prediction_service from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO from .grpc import PredictionServiceGrpcTransport @@ -257,6 +258,35 @@ def predict( ) return self._stubs["predict"] + @property + def raw_predict( + self, + ) -> Callable[ + [prediction_service.RawPredictRequest], Awaitable[httpbody_pb2.HttpBody] + ]: + r"""Return a callable for the raw predict method over gRPC. + + Perform an online prediction with arbitrary http + payload. + + Returns: + Callable[[~.RawPredictRequest], + Awaitable[~.HttpBody]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "raw_predict" not in self._stubs: + self._stubs["raw_predict"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.PredictionService/RawPredict", + request_serializer=prediction_service.RawPredictRequest.serialize, + response_deserializer=httpbody_pb2.HttpBody.FromString, + ) + return self._stubs["raw_predict"] + @property def explain( self, diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py index 277938e3cd..55b7b8302f 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py @@ -224,12 +224,12 @@ async def create_specialist_pool( The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data labeling jobs. It includes a group of specialist - managers who are responsible for managing the - labelers in this pool as well as customers' data - labeling jobs associated with this pool. Customers - create specialist pool as well as start data labeling - jobs on Cloud, managers and labelers work with the - jobs using CrowdCompute console. + managers and workers. Managers are responsible for + managing the workers in this pool as well as + customers' data labeling jobs associated with this + pool. Customers create specialist pool as well as + start data labeling jobs on Cloud, managers and + workers handle the jobs using CrowdCompute console. """ # Create or coerce a protobuf request object. @@ -313,14 +313,15 @@ async def get_specialist_pool( SpecialistPool represents customers' own workforce to work on their data labeling jobs. It includes a group of - specialist managers who are responsible - for managing the labelers in this pool - as well as customers' data labeling jobs - associated with this pool. - Customers create specialist pool as well - as start data labeling jobs on Cloud, - managers and labelers work with the jobs - using CrowdCompute console. + specialist managers and workers. + Managers are responsible for managing + the workers in this pool as well as + customers' data labeling jobs associated + with this pool. Customers create + specialist pool as well as start data + labeling jobs on Cloud, managers and + workers handle the jobs using + CrowdCompute console. """ # Create or coerce a protobuf request object. @@ -577,12 +578,12 @@ async def update_specialist_pool( The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data labeling jobs. It includes a group of specialist - managers who are responsible for managing the - labelers in this pool as well as customers' data - labeling jobs associated with this pool. Customers - create specialist pool as well as start data labeling - jobs on Cloud, managers and labelers work with the - jobs using CrowdCompute console. + managers and workers. Managers are responsible for + managing the workers in this pool as well as + customers' data labeling jobs associated with this + pool. Customers create specialist pool as well as + start data labeling jobs on Cloud, managers and + workers handle the jobs using CrowdCompute console. """ # Create or coerce a protobuf request object. diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py index 31cb4142ee..cb27dfbf9b 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py @@ -408,12 +408,12 @@ def create_specialist_pool( The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data labeling jobs. It includes a group of specialist - managers who are responsible for managing the - labelers in this pool as well as customers' data - labeling jobs associated with this pool. Customers - create specialist pool as well as start data labeling - jobs on Cloud, managers and labelers work with the - jobs using CrowdCompute console. + managers and workers. Managers are responsible for + managing the workers in this pool as well as + customers' data labeling jobs associated with this + pool. Customers create specialist pool as well as + start data labeling jobs on Cloud, managers and + workers handle the jobs using CrowdCompute console. """ # Create or coerce a protobuf request object. @@ -497,14 +497,15 @@ def get_specialist_pool( SpecialistPool represents customers' own workforce to work on their data labeling jobs. It includes a group of - specialist managers who are responsible - for managing the labelers in this pool - as well as customers' data labeling jobs - associated with this pool. - Customers create specialist pool as well - as start data labeling jobs on Cloud, - managers and labelers work with the jobs - using CrowdCompute console. + specialist managers and workers. + Managers are responsible for managing + the workers in this pool as well as + customers' data labeling jobs associated + with this pool. Customers create + specialist pool as well as start data + labeling jobs on Cloud, managers and + workers handle the jobs using + CrowdCompute console. """ # Create or coerce a protobuf request object. @@ -761,12 +762,12 @@ def update_specialist_pool( The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data labeling jobs. It includes a group of specialist - managers who are responsible for managing the - labelers in this pool as well as customers' data - labeling jobs associated with this pool. Customers - create specialist pool as well as start data labeling - jobs on Cloud, managers and labelers work with the - jobs using CrowdCompute console. + managers and workers. Managers are responsible for + managing the workers in this pool as well as + customers' data labeling jobs associated with this + pool. Customers create specialist pool as well as + start data labeling jobs on Cloud, managers and + workers handle the jobs using CrowdCompute console. """ # Create or coerce a protobuf request object. diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py index 692a769805..34a7fb7b26 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py @@ -484,9 +484,9 @@ async def list_tensorboards( The request object. Request message for [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. parent (:class:`str`): - Required. The resource name of the - Location to list Tensorboards. Format: - 'projects/{project}/locations/{location}' + Required. The resource name of the Location to list + Tensorboards. Format: + ``projects/{project}/locations/{location}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -1099,8 +1099,8 @@ async def create_tensorboard_run( The request object. Request message for [TensorboardService.CreateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun]. parent (:class:`str`): - Required. The resource name of the Tensorboard to create - the TensorboardRun in. Format: + Required. The resource name of the TensorboardExperiment + to create the TensorboardRun in. Format: ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` This corresponds to the ``parent`` field @@ -1179,6 +1179,93 @@ async def create_tensorboard_run( # Done; return the response. return response + async def batch_create_tensorboard_runs( + self, + request: tensorboard_service.BatchCreateTensorboardRunsRequest = None, + *, + parent: str = None, + requests: Sequence[tensorboard_service.CreateTensorboardRunRequest] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.BatchCreateTensorboardRunsResponse: + r"""Batch create TensorboardRuns. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsRequest`): + The request object. Request message for + [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns]. + parent (:class:`str`): + Required. The resource name of the TensorboardExperiment + to create the TensorboardRuns in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + The parent field in the CreateTensorboardRunRequest + messages must match this field. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + requests (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest]`): + Required. The request message + specifying the TensorboardRuns to + create. A maximum of 1000 + TensorboardRuns can be created in a + batch. + + This corresponds to the ``requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsResponse: + Response message for + [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, requests]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = tensorboard_service.BatchCreateTensorboardRunsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if requests: + request.requests.extend(requests) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_create_tensorboard_runs, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + async def get_tensorboard_run( self, request: tensorboard_service.GetTensorboardRunRequest = None, @@ -1361,8 +1448,8 @@ async def list_tensorboard_runs( [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. parent (:class:`str`): Required. The resource name of the - Tensorboard to list TensorboardRuns. - Format: + TensorboardExperiment to list + TensorboardRuns. Format: 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}' This corresponds to the ``parent`` field @@ -1519,6 +1606,97 @@ async def delete_tensorboard_run( # Done; return the response. return response + async def batch_create_tensorboard_time_series( + self, + request: tensorboard_service.BatchCreateTensorboardTimeSeriesRequest = None, + *, + parent: str = None, + requests: Sequence[ + tensorboard_service.CreateTensorboardTimeSeriesRequest + ] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.BatchCreateTensorboardTimeSeriesResponse: + r"""Batch create TensorboardTimeSeries that belong to a + TensorboardExperiment. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesRequest`): + The request object. Request message for + [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries]. + parent (:class:`str`): + Required. The resource name of the TensorboardExperiment + to create the TensorboardTimeSeries in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + The TensorboardRuns referenced by the parent fields in + the CreateTensorboardTimeSeriesRequest messages must be + sub resources of this TensorboardExperiment. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + requests (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest]`): + Required. The request message + specifying the TensorboardTimeSeries to + create. A maximum of 1000 + TensorboardTimeSeries can be created in + a batch. + + This corresponds to the ``requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesResponse: + Response message for + [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, requests]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = tensorboard_service.BatchCreateTensorboardTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if requests: + request.requests.extend(requests) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_create_tensorboard_time_series, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + async def create_tensorboard_time_series( self, request: tensorboard_service.CreateTensorboardTimeSeriesRequest = None, @@ -2099,6 +2277,94 @@ def read_tensorboard_blob_data( # Done; return the response. return response + async def write_tensorboard_experiment_data( + self, + request: tensorboard_service.WriteTensorboardExperimentDataRequest = None, + *, + tensorboard_experiment: str = None, + write_run_data_requests: Sequence[ + tensorboard_service.WriteTensorboardRunDataRequest + ] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.WriteTensorboardExperimentDataResponse: + r"""Write time series data points of multiple + TensorboardTimeSeries in multiple TensorboardRun's. If + any data fail to be ingested, an error will be returned. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataRequest`): + The request object. Request message for + [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData]. + tensorboard_experiment (:class:`str`): + Required. The resource name of the TensorboardExperiment + to write data to. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``tensorboard_experiment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + write_run_data_requests (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest]`): + Required. Requests containing per-run + TensorboardTimeSeries data to write. + + This corresponds to the ``write_run_data_requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataResponse: + Response message for + [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_experiment, write_run_data_requests]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = tensorboard_service.WriteTensorboardExperimentDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_experiment is not None: + request.tensorboard_experiment = tensorboard_experiment + if write_run_data_requests: + request.write_run_data_requests.extend(write_run_data_requests) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.write_tensorboard_experiment_data, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("tensorboard_experiment", request.tensorboard_experiment),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + async def write_tensorboard_run_data( self, request: tensorboard_service.WriteTensorboardRunDataRequest = None, diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py index 80cd787912..57e9dd36dd 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py @@ -727,9 +727,9 @@ def list_tensorboards( The request object. Request message for [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. parent (str): - Required. The resource name of the - Location to list Tensorboards. Format: - 'projects/{project}/locations/{location}' + Required. The resource name of the Location to list + Tensorboards. Format: + ``projects/{project}/locations/{location}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -1360,8 +1360,8 @@ def create_tensorboard_run( The request object. Request message for [TensorboardService.CreateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun]. parent (str): - Required. The resource name of the Tensorboard to create - the TensorboardRun in. Format: + Required. The resource name of the TensorboardExperiment + to create the TensorboardRun in. Format: ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` This corresponds to the ``parent`` field @@ -1440,6 +1440,97 @@ def create_tensorboard_run( # Done; return the response. return response + def batch_create_tensorboard_runs( + self, + request: tensorboard_service.BatchCreateTensorboardRunsRequest = None, + *, + parent: str = None, + requests: Sequence[tensorboard_service.CreateTensorboardRunRequest] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.BatchCreateTensorboardRunsResponse: + r"""Batch create TensorboardRuns. + + Args: + request (google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsRequest): + The request object. Request message for + [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns]. + parent (str): + Required. The resource name of the TensorboardExperiment + to create the TensorboardRuns in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + The parent field in the CreateTensorboardRunRequest + messages must match this field. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + requests (Sequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest]): + Required. The request message + specifying the TensorboardRuns to + create. A maximum of 1000 + TensorboardRuns can be created in a + batch. + + This corresponds to the ``requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsResponse: + Response message for + [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, requests]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.BatchCreateTensorboardRunsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, tensorboard_service.BatchCreateTensorboardRunsRequest + ): + request = tensorboard_service.BatchCreateTensorboardRunsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if requests is not None: + request.requests = requests + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.batch_create_tensorboard_runs + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + def get_tensorboard_run( self, request: tensorboard_service.GetTensorboardRunRequest = None, @@ -1622,8 +1713,8 @@ def list_tensorboard_runs( [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. parent (str): Required. The resource name of the - Tensorboard to list TensorboardRuns. - Format: + TensorboardExperiment to list + TensorboardRuns. Format: 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}' This corresponds to the ``parent`` field @@ -1780,6 +1871,103 @@ def delete_tensorboard_run( # Done; return the response. return response + def batch_create_tensorboard_time_series( + self, + request: tensorboard_service.BatchCreateTensorboardTimeSeriesRequest = None, + *, + parent: str = None, + requests: Sequence[ + tensorboard_service.CreateTensorboardTimeSeriesRequest + ] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.BatchCreateTensorboardTimeSeriesResponse: + r"""Batch create TensorboardTimeSeries that belong to a + TensorboardExperiment. + + Args: + request (google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesRequest): + The request object. Request message for + [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries]. + parent (str): + Required. The resource name of the TensorboardExperiment + to create the TensorboardTimeSeries in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + The TensorboardRuns referenced by the parent fields in + the CreateTensorboardTimeSeriesRequest messages must be + sub resources of this TensorboardExperiment. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + requests (Sequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest]): + Required. The request message + specifying the TensorboardTimeSeries to + create. A maximum of 1000 + TensorboardTimeSeries can be created in + a batch. + + This corresponds to the ``requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesResponse: + Response message for + [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, requests]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.BatchCreateTensorboardTimeSeriesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, tensorboard_service.BatchCreateTensorboardTimeSeriesRequest + ): + request = tensorboard_service.BatchCreateTensorboardTimeSeriesRequest( + request + ) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if requests is not None: + request.requests = requests + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.batch_create_tensorboard_time_series + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + def create_tensorboard_time_series( self, request: tensorboard_service.CreateTensorboardTimeSeriesRequest = None, @@ -2384,6 +2572,98 @@ def read_tensorboard_blob_data( # Done; return the response. return response + def write_tensorboard_experiment_data( + self, + request: tensorboard_service.WriteTensorboardExperimentDataRequest = None, + *, + tensorboard_experiment: str = None, + write_run_data_requests: Sequence[ + tensorboard_service.WriteTensorboardRunDataRequest + ] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.WriteTensorboardExperimentDataResponse: + r"""Write time series data points of multiple + TensorboardTimeSeries in multiple TensorboardRun's. If + any data fail to be ingested, an error will be returned. + + Args: + request (google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataRequest): + The request object. Request message for + [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData]. + tensorboard_experiment (str): + Required. The resource name of the TensorboardExperiment + to write data to. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``tensorboard_experiment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + write_run_data_requests (Sequence[google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest]): + Required. Requests containing per-run + TensorboardTimeSeries data to write. + + This corresponds to the ``write_run_data_requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataResponse: + Response message for + [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_experiment, write_run_data_requests]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.WriteTensorboardExperimentDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, tensorboard_service.WriteTensorboardExperimentDataRequest + ): + request = tensorboard_service.WriteTensorboardExperimentDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_experiment is not None: + request.tensorboard_experiment = tensorboard_experiment + if write_run_data_requests is not None: + request.write_run_data_requests = write_run_data_requests + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.write_tensorboard_experiment_data + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("tensorboard_experiment", request.tensorboard_experiment),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + def write_tensorboard_run_data( self, request: tensorboard_service.WriteTensorboardRunDataRequest = None, diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py index 68b3f921a9..467bfa48fb 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py @@ -212,6 +212,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.batch_create_tensorboard_runs: gapic_v1.method.wrap_method( + self.batch_create_tensorboard_runs, + default_timeout=None, + client_info=client_info, + ), self.get_tensorboard_run: gapic_v1.method.wrap_method( self.get_tensorboard_run, default_timeout=None, client_info=client_info, ), @@ -230,6 +235,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.batch_create_tensorboard_time_series: gapic_v1.method.wrap_method( + self.batch_create_tensorboard_time_series, + default_timeout=None, + client_info=client_info, + ), self.create_tensorboard_time_series: gapic_v1.method.wrap_method( self.create_tensorboard_time_series, default_timeout=None, @@ -265,6 +275,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.write_tensorboard_experiment_data: gapic_v1.method.wrap_method( + self.write_tensorboard_experiment_data, + default_timeout=None, + client_info=client_info, + ), self.write_tensorboard_run_data: gapic_v1.method.wrap_method( self.write_tensorboard_run_data, default_timeout=None, @@ -399,6 +414,18 @@ def create_tensorboard_run( ]: raise NotImplementedError() + @property + def batch_create_tensorboard_runs( + self, + ) -> Callable[ + [tensorboard_service.BatchCreateTensorboardRunsRequest], + Union[ + tensorboard_service.BatchCreateTensorboardRunsResponse, + Awaitable[tensorboard_service.BatchCreateTensorboardRunsResponse], + ], + ]: + raise NotImplementedError() + @property def get_tensorboard_run( self, @@ -443,6 +470,18 @@ def delete_tensorboard_run( ]: raise NotImplementedError() + @property + def batch_create_tensorboard_time_series( + self, + ) -> Callable[ + [tensorboard_service.BatchCreateTensorboardTimeSeriesRequest], + Union[ + tensorboard_service.BatchCreateTensorboardTimeSeriesResponse, + Awaitable[tensorboard_service.BatchCreateTensorboardTimeSeriesResponse], + ], + ]: + raise NotImplementedError() + @property def create_tensorboard_time_series( self, @@ -524,6 +563,18 @@ def read_tensorboard_blob_data( ]: raise NotImplementedError() + @property + def write_tensorboard_experiment_data( + self, + ) -> Callable[ + [tensorboard_service.WriteTensorboardExperimentDataRequest], + Union[ + tensorboard_service.WriteTensorboardExperimentDataResponse, + Awaitable[tensorboard_service.WriteTensorboardExperimentDataResponse], + ], + ]: + raise NotImplementedError() + @property def write_tensorboard_run_data( self, diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py index 6cfbc981b4..4e65055f47 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py @@ -572,6 +572,37 @@ def create_tensorboard_run( ) return self._stubs["create_tensorboard_run"] + @property + def batch_create_tensorboard_runs( + self, + ) -> Callable[ + [tensorboard_service.BatchCreateTensorboardRunsRequest], + tensorboard_service.BatchCreateTensorboardRunsResponse, + ]: + r"""Return a callable for the batch create tensorboard runs method over gRPC. + + Batch create TensorboardRuns. + + Returns: + Callable[[~.BatchCreateTensorboardRunsRequest], + ~.BatchCreateTensorboardRunsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_create_tensorboard_runs" not in self._stubs: + self._stubs[ + "batch_create_tensorboard_runs" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/BatchCreateTensorboardRuns", + request_serializer=tensorboard_service.BatchCreateTensorboardRunsRequest.serialize, + response_deserializer=tensorboard_service.BatchCreateTensorboardRunsResponse.deserialize, + ) + return self._stubs["batch_create_tensorboard_runs"] + @property def get_tensorboard_run( self, @@ -686,6 +717,39 @@ def delete_tensorboard_run( ) return self._stubs["delete_tensorboard_run"] + @property + def batch_create_tensorboard_time_series( + self, + ) -> Callable[ + [tensorboard_service.BatchCreateTensorboardTimeSeriesRequest], + tensorboard_service.BatchCreateTensorboardTimeSeriesResponse, + ]: + r"""Return a callable for the batch create tensorboard time + series method over gRPC. + + Batch create TensorboardTimeSeries that belong to a + TensorboardExperiment. + + Returns: + Callable[[~.BatchCreateTensorboardTimeSeriesRequest], + ~.BatchCreateTensorboardTimeSeriesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_create_tensorboard_time_series" not in self._stubs: + self._stubs[ + "batch_create_tensorboard_time_series" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/BatchCreateTensorboardTimeSeries", + request_serializer=tensorboard_service.BatchCreateTensorboardTimeSeriesRequest.serialize, + response_deserializer=tensorboard_service.BatchCreateTensorboardTimeSeriesResponse.deserialize, + ) + return self._stubs["batch_create_tensorboard_time_series"] + @property def create_tensorboard_time_series( self, @@ -906,6 +970,40 @@ def read_tensorboard_blob_data( ) return self._stubs["read_tensorboard_blob_data"] + @property + def write_tensorboard_experiment_data( + self, + ) -> Callable[ + [tensorboard_service.WriteTensorboardExperimentDataRequest], + tensorboard_service.WriteTensorboardExperimentDataResponse, + ]: + r"""Return a callable for the write tensorboard experiment + data method over gRPC. + + Write time series data points of multiple + TensorboardTimeSeries in multiple TensorboardRun's. If + any data fail to be ingested, an error will be returned. + + Returns: + Callable[[~.WriteTensorboardExperimentDataRequest], + ~.WriteTensorboardExperimentDataResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "write_tensorboard_experiment_data" not in self._stubs: + self._stubs[ + "write_tensorboard_experiment_data" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/WriteTensorboardExperimentData", + request_serializer=tensorboard_service.WriteTensorboardExperimentDataRequest.serialize, + response_deserializer=tensorboard_service.WriteTensorboardExperimentDataResponse.deserialize, + ) + return self._stubs["write_tensorboard_experiment_data"] + @property def write_tensorboard_run_data( self, diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py index c0e9adda84..3a16d00295 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py @@ -582,6 +582,37 @@ def create_tensorboard_run( ) return self._stubs["create_tensorboard_run"] + @property + def batch_create_tensorboard_runs( + self, + ) -> Callable[ + [tensorboard_service.BatchCreateTensorboardRunsRequest], + Awaitable[tensorboard_service.BatchCreateTensorboardRunsResponse], + ]: + r"""Return a callable for the batch create tensorboard runs method over gRPC. + + Batch create TensorboardRuns. + + Returns: + Callable[[~.BatchCreateTensorboardRunsRequest], + Awaitable[~.BatchCreateTensorboardRunsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_create_tensorboard_runs" not in self._stubs: + self._stubs[ + "batch_create_tensorboard_runs" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/BatchCreateTensorboardRuns", + request_serializer=tensorboard_service.BatchCreateTensorboardRunsRequest.serialize, + response_deserializer=tensorboard_service.BatchCreateTensorboardRunsResponse.deserialize, + ) + return self._stubs["batch_create_tensorboard_runs"] + @property def get_tensorboard_run( self, @@ -698,6 +729,39 @@ def delete_tensorboard_run( ) return self._stubs["delete_tensorboard_run"] + @property + def batch_create_tensorboard_time_series( + self, + ) -> Callable[ + [tensorboard_service.BatchCreateTensorboardTimeSeriesRequest], + Awaitable[tensorboard_service.BatchCreateTensorboardTimeSeriesResponse], + ]: + r"""Return a callable for the batch create tensorboard time + series method over gRPC. + + Batch create TensorboardTimeSeries that belong to a + TensorboardExperiment. + + Returns: + Callable[[~.BatchCreateTensorboardTimeSeriesRequest], + Awaitable[~.BatchCreateTensorboardTimeSeriesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_create_tensorboard_time_series" not in self._stubs: + self._stubs[ + "batch_create_tensorboard_time_series" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/BatchCreateTensorboardTimeSeries", + request_serializer=tensorboard_service.BatchCreateTensorboardTimeSeriesRequest.serialize, + response_deserializer=tensorboard_service.BatchCreateTensorboardTimeSeriesResponse.deserialize, + ) + return self._stubs["batch_create_tensorboard_time_series"] + @property def create_tensorboard_time_series( self, @@ -918,6 +982,40 @@ def read_tensorboard_blob_data( ) return self._stubs["read_tensorboard_blob_data"] + @property + def write_tensorboard_experiment_data( + self, + ) -> Callable[ + [tensorboard_service.WriteTensorboardExperimentDataRequest], + Awaitable[tensorboard_service.WriteTensorboardExperimentDataResponse], + ]: + r"""Return a callable for the write tensorboard experiment + data method over gRPC. + + Write time series data points of multiple + TensorboardTimeSeries in multiple TensorboardRun's. If + any data fail to be ingested, an error will be returned. + + Returns: + Callable[[~.WriteTensorboardExperimentDataRequest], + Awaitable[~.WriteTensorboardExperimentDataResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "write_tensorboard_experiment_data" not in self._stubs: + self._stubs[ + "write_tensorboard_experiment_data" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.TensorboardService/WriteTensorboardExperimentData", + request_serializer=tensorboard_service.WriteTensorboardExperimentDataRequest.serialize, + response_deserializer=tensorboard_service.WriteTensorboardExperimentDataResponse.deserialize, + ) + return self._stubs["write_tensorboard_experiment_data"] + @property def write_tensorboard_run_data( self, diff --git a/google/cloud/aiplatform_v1beta1/types/__init__.py b/google/cloud/aiplatform_v1beta1/types/__init__.py index 932c8be0b1..d2163f8502 100644 --- a/google/cloud/aiplatform_v1beta1/types/__init__.py +++ b/google/cloud/aiplatform_v1beta1/types/__init__.py @@ -380,6 +380,7 @@ ExplainResponse, PredictRequest, PredictResponse, + RawPredictRequest, ) from .specialist_pool import SpecialistPool from .specialist_pool_service import ( @@ -410,6 +411,10 @@ from .tensorboard_experiment import TensorboardExperiment from .tensorboard_run import TensorboardRun from .tensorboard_service import ( + BatchCreateTensorboardRunsRequest, + BatchCreateTensorboardRunsResponse, + BatchCreateTensorboardTimeSeriesRequest, + BatchCreateTensorboardTimeSeriesResponse, CreateTensorboardExperimentRequest, CreateTensorboardOperationMetadata, CreateTensorboardRequest, @@ -442,6 +447,8 @@ UpdateTensorboardRequest, UpdateTensorboardRunRequest, UpdateTensorboardTimeSeriesRequest, + WriteTensorboardExperimentDataRequest, + WriteTensorboardExperimentDataResponse, WriteTensorboardRunDataRequest, WriteTensorboardRunDataResponse, ) @@ -807,6 +814,7 @@ "ExplainResponse", "PredictRequest", "PredictResponse", + "RawPredictRequest", "SpecialistPool", "CreateSpecialistPoolOperationMetadata", "CreateSpecialistPoolRequest", @@ -829,6 +837,10 @@ "TimeSeriesDataPoint", "TensorboardExperiment", "TensorboardRun", + "BatchCreateTensorboardRunsRequest", + "BatchCreateTensorboardRunsResponse", + "BatchCreateTensorboardTimeSeriesRequest", + "BatchCreateTensorboardTimeSeriesResponse", "CreateTensorboardExperimentRequest", "CreateTensorboardOperationMetadata", "CreateTensorboardRequest", @@ -861,6 +873,8 @@ "UpdateTensorboardRequest", "UpdateTensorboardRunRequest", "UpdateTensorboardTimeSeriesRequest", + "WriteTensorboardExperimentDataRequest", + "WriteTensorboardExperimentDataResponse", "WriteTensorboardRunDataRequest", "WriteTensorboardRunDataResponse", "TensorboardTimeSeries", diff --git a/google/cloud/aiplatform_v1beta1/types/artifact.py b/google/cloud/aiplatform_v1beta1/types/artifact.py index 1cf5c15350..ce35a28b96 100644 --- a/google/cloud/aiplatform_v1beta1/types/artifact.py +++ b/google/cloud/aiplatform_v1beta1/types/artifact.py @@ -81,6 +81,7 @@ class Artifact(proto.Message): metadata store. metadata (google.protobuf.struct_pb2.Struct): Properties of the Artifact. + The size of this field should not exceed 200KB. description (str): Description of the Artifact """ diff --git a/google/cloud/aiplatform_v1beta1/types/context.py b/google/cloud/aiplatform_v1beta1/types/context.py index 412b6b462a..53ee6abeef 100644 --- a/google/cloud/aiplatform_v1beta1/types/context.py +++ b/google/cloud/aiplatform_v1beta1/types/context.py @@ -74,6 +74,7 @@ class Context(proto.Message): metadata store. metadata (google.protobuf.struct_pb2.Struct): Properties of the Context. + The size of this field should not exceed 200KB. description (str): Description of the Context """ diff --git a/google/cloud/aiplatform_v1beta1/types/custom_job.py b/google/cloud/aiplatform_v1beta1/types/custom_job.py index 4a810ce781..2a09c1b95f 100644 --- a/google/cloud/aiplatform_v1beta1/types/custom_job.py +++ b/google/cloud/aiplatform_v1beta1/types/custom_job.py @@ -87,11 +87,19 @@ class CustomJob(proto.Message): created by the CustomJob will be encrypted with the provided encryption key. web_access_uris (Sequence[google.cloud.aiplatform_v1beta1.types.CustomJob.WebAccessUrisEntry]): - Output only. The web access URIs for the - training job. The keys are the node names in the - training jobs, e.g. workerpool0-0. The values - are the URIs for each node's web portal in the - job. + Output only. URIs for accessing `interactive + shells `__ + (one URI for each training node). Only available if + [job_spec.enable_web_access][google.cloud.aiplatform.v1beta1.CustomJobSpec.enable_web_access] + is ``true``. + + The keys are names of each node in the training job; for + example, ``workerpool0-0`` for the primary node, + ``workerpool1-0`` for the first node in the second worker + pool, and ``workerpool1-1`` for the second node in the + second worker pool. + + The values are the URIs for each node's interactive shell. """ name = proto.Field(proto.STRING, number=1,) @@ -123,7 +131,7 @@ class CustomJobSpec(proto.Message): service_account (str): Specifies the service account for workload run-as account. Users submitting jobs must have act-as permission on this - run-as account. If unspecified, the `AI Platform Custom Code + run-as account. If unspecified, the `Vertex AI Custom Code Service Agent `__ for the CustomJob's project is used. @@ -175,9 +183,18 @@ class CustomJobSpec(proto.Message): logs. Format: ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` enable_web_access (bool): - Optional. Vertex AI will enable web portal access to the - containers. The portals can be accessed on web via the URLs - given by [web_access_uris][]. + Optional. Whether you want Vertex AI to enable `interactive + shell + access `__ + to training containers. + + If set to ``true``, you can access interactive shells at the + URIs given by + [CustomJob.web_access_uris][google.cloud.aiplatform.v1beta1.CustomJob.web_access_uris] + or + [Trial.web_access_uris][google.cloud.aiplatform.v1beta1.Trial.web_access_uris] + (within + [HyperparameterTuningJob.trials][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.trials]). """ worker_pool_specs = proto.RepeatedField( diff --git a/google/cloud/aiplatform_v1beta1/types/dataset.py b/google/cloud/aiplatform_v1beta1/types/dataset.py index b43279a2a8..734a4a944a 100644 --- a/google/cloud/aiplatform_v1beta1/types/dataset.py +++ b/google/cloud/aiplatform_v1beta1/types/dataset.py @@ -37,6 +37,8 @@ class Dataset(proto.Message): Required. The user-defined name of the Dataset. The name can be up to 128 characters long and can be consist of any UTF-8 characters. + description (str): + Optional. The description of the Dataset. metadata_schema_uri (str): Required. Points to a YAML file stored on Google Cloud Storage describing additional @@ -86,6 +88,7 @@ class Dataset(proto.Message): name = proto.Field(proto.STRING, number=1,) display_name = proto.Field(proto.STRING, number=2,) + description = proto.Field(proto.STRING, number=16,) metadata_schema_uri = proto.Field(proto.STRING, number=3,) metadata = proto.Field(proto.MESSAGE, number=8, message=struct_pb2.Value,) create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,) diff --git a/google/cloud/aiplatform_v1beta1/types/endpoint.py b/google/cloud/aiplatform_v1beta1/types/endpoint.py index a9f2a9e1f5..1067b935e6 100644 --- a/google/cloud/aiplatform_v1beta1/types/endpoint.py +++ b/google/cloud/aiplatform_v1beta1/types/endpoint.py @@ -96,6 +96,11 @@ class Endpoint(proto.Message): projects/{project}/global/networks/{network}. Where {project} is a project number, as in '12345', and {network} is network name. + model_deployment_monitoring_job (str): + Output only. Resource name of the Model Monitoring job + associated with this Endpoint if monitoring is enabled by + [CreateModelDeploymentMonitoringJob][]. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` """ name = proto.Field(proto.STRING, number=1,) @@ -113,6 +118,7 @@ class Endpoint(proto.Message): proto.MESSAGE, number=10, message=gca_encryption_spec.EncryptionSpec, ) network = proto.Field(proto.STRING, number=13,) + model_deployment_monitoring_job = proto.Field(proto.STRING, number=14,) class DeployedModel(proto.Message): @@ -126,7 +132,7 @@ class DeployedModel(proto.Message): degree of manual configuration. automatic_resources (google.cloud.aiplatform_v1beta1.types.AutomaticResources): A description of resources that to large - degree are decided by AI Platform, and require + degree are decided by Vertex AI, and require only a modest additional configuration. id (str): Output only. The ID of the DeployedModel. diff --git a/google/cloud/aiplatform_v1beta1/types/execution.py b/google/cloud/aiplatform_v1beta1/types/execution.py index 1ac953399c..3bf745c3cb 100644 --- a/google/cloud/aiplatform_v1beta1/types/execution.py +++ b/google/cloud/aiplatform_v1beta1/types/execution.py @@ -77,6 +77,7 @@ class Execution(proto.Message): metadata store. metadata (google.protobuf.struct_pb2.Struct): Properties of the Execution. + The size of this field should not exceed 200KB. description (str): Description of the Execution """ diff --git a/google/cloud/aiplatform_v1beta1/types/explanation.py b/google/cloud/aiplatform_v1beta1/types/explanation.py index 8091a4b1d1..4c94178992 100644 --- a/google/cloud/aiplatform_v1beta1/types/explanation.py +++ b/google/cloud/aiplatform_v1beta1/types/explanation.py @@ -494,7 +494,7 @@ class ExplanationSpecOverride(proto.Message): r"""The [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] entries that can be overridden at [online - explanation][PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain] + explanation][google.cloud.aiplatform.v1beta1.PredictionService.Explain] time. Attributes: diff --git a/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py b/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py index 0c2a926618..95c8bda9f8 100644 --- a/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py +++ b/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py @@ -83,7 +83,7 @@ class InputMetadata(proto.Message): If no baseline is specified, Vertex AI chooses the baseline for this feature. If multiple baselines are specified, Vertex AI returns the average attributions across them in - [Attributions.baseline_attribution][]. + [Attribution.feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions]. For Vertex AI-provided Tensorflow images (both 1.x and 2.x), the shape of each baseline must match the shape of the input @@ -130,7 +130,7 @@ class InputMetadata(proto.Message): encoded_tensor_name (str): Encoded tensor is a transformation of the input tensor. Must be provided if choosing [Integrated Gradients - attribution][ExplanationParameters.integrated_gradients_attribution] + attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution] or [XRAI attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution] and the input tensor is not differentiable. @@ -151,15 +151,13 @@ class InputMetadata(proto.Message): the same group name will be treated as one feature when computing attributions. Features grouped together can have different shapes in value. If provided, there will be one - single attribution generated in [ - featureAttributions][Attribution.feature_attributions], + single attribution generated in + [Attribution.feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions], keyed by the group name. """ class Encoding(proto.Enum): - r"""Defines how the feature is encoded to [encoded_tensor][]. Defaults - to IDENTITY. - """ + r"""Defines how a feature is encoded. Defaults to IDENTITY.""" ENCODING_UNSPECIFIED = 0 IDENTITY = 1 BAG_OF_FEATURES = 2 @@ -206,8 +204,8 @@ class Visualization(proto.Message): Attributes: type_ (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization.Type): Type of the image visualization. Only applicable to - [Integrated Gradients attribution] - [ExplanationParameters.integrated_gradients_attribution]. + [Integrated Gradients + attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution]. OUTLINES shows regions of attribution, while PIXELS shows per-pixel attribution. Defaults to OUTLINES. polarity (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization.Polarity): @@ -218,7 +216,7 @@ class Visualization(proto.Message): The color scheme used for the highlighted areas. Defaults to PINK_GREEN for [Integrated Gradients - attribution][ExplanationParameters.integrated_gradients_attribution], + attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution], which shows positive attributions in green and negative in pink. @@ -246,8 +244,8 @@ class Visualization(proto.Message): class Type(proto.Enum): r"""Type of the image visualization. Only applicable to [Integrated - Gradients attribution] - [ExplanationParameters.integrated_gradients_attribution]. + Gradients + attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution]. """ TYPE_UNSPECIFIED = 0 PIXELS = 1 @@ -362,8 +360,8 @@ class OutputMetadata(proto.Message): for a specific output. output_tensor_name (str): Name of the output tensor. Required and is - only applicable to AI Platform provided images - for Tensorflow. + only applicable to Vertex AI provided images for + Tensorflow. """ index_display_name_mapping = proto.Field( diff --git a/google/cloud/aiplatform_v1beta1/types/featurestore.py b/google/cloud/aiplatform_v1beta1/types/featurestore.py index 9a3e71157e..2fe67d1512 100644 --- a/google/cloud/aiplatform_v1beta1/types/featurestore.py +++ b/google/cloud/aiplatform_v1beta1/types/featurestore.py @@ -74,10 +74,10 @@ class OnlineServingConfig(proto.Message): Attributes: fixed_node_count (int): - Required. The number of nodes for each - cluster. The number of nodes will not scale - automatically but can be scaled manually by - providing different values when updating. + The number of nodes for each cluster. The + number of nodes will not scale automatically but + can be scaled manually by providing different + values when updating. """ fixed_node_count = proto.Field(proto.INT32, number=2,) diff --git a/google/cloud/aiplatform_v1beta1/types/index_endpoint.py b/google/cloud/aiplatform_v1beta1/types/index_endpoint.py index c04ed7dcba..4b9fa9dea7 100644 --- a/google/cloud/aiplatform_v1beta1/types/index_endpoint.py +++ b/google/cloud/aiplatform_v1beta1/types/index_endpoint.py @@ -183,6 +183,22 @@ class DeployedIndex(proto.Message): The value sohuld be the name of the address (https://cloud.google.com/compute/docs/reference/rest/v1/addresses) Example: 'vertex-ai-ip-range'. + deployment_group (str): + Optional. The deployment group can be no longer than 64 + characters (eg: 'test', 'prod'). If not set, we will use the + 'default' deployment group. + + Creating ``deployment_groups`` with ``reserved_ip_ranges`` + is a recommended practice when the peered network has + multiple peering ranges. This creates your deployments from + predictable IP spaces for easier traffic administration. + Also, one deployment_group (except 'default') can only be + used with the same reserved_ip_ranges which means if the + deployment_group has been used with reserved_ip_ranges: [a, + b, c], using it with [a, b] or [d, e] is disallowed. + + Note: we only support up to 5 deployment groups(not + including 'default'). """ id = proto.Field(proto.STRING, number=1,) @@ -203,6 +219,7 @@ class DeployedIndex(proto.Message): proto.MESSAGE, number=9, message="DeployedIndexAuthConfig", ) reserved_ip_ranges = proto.RepeatedField(proto.STRING, number=10,) + deployment_group = proto.Field(proto.STRING, number=11,) class DeployedIndexAuthConfig(proto.Message): diff --git a/google/cloud/aiplatform_v1beta1/types/index_service.py b/google/cloud/aiplatform_v1beta1/types/index_service.py index c6b5837295..c7f19f6b31 100644 --- a/google/cloud/aiplatform_v1beta1/types/index_service.py +++ b/google/cloud/aiplatform_v1beta1/types/index_service.py @@ -204,6 +204,8 @@ class NearestNeighborSearchOperationMetadata(proto.Message): Please note that, currently for those files that are broken or has unsupported file format, we will not have the stats for those files. + data_bytes_count (int): + The ingested data size in bytes. """ class RecordError(proto.Message): @@ -277,6 +279,7 @@ class ContentValidationStats(proto.Message): content_validation_stats = proto.RepeatedField( proto.MESSAGE, number=1, message=ContentValidationStats, ) + data_bytes_count = proto.Field(proto.INT64, number=2,) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/metadata_service.py b/google/cloud/aiplatform_v1beta1/types/metadata_service.py index 5d6ff87acd..71da102b29 100644 --- a/google/cloud/aiplatform_v1beta1/types/metadata_service.py +++ b/google/cloud/aiplatform_v1beta1/types/metadata_service.py @@ -202,10 +202,7 @@ class DeleteMetadataStoreRequest(proto.Message): MetadataStore to delete. Format: projects/{project}/locations/{location}/metadataStores/{metadatastore} force (bool): - If set to true, any child resources of this MetadataStore - will be deleted. (Otherwise, the request will fail with a - FAILED_PRECONDITION error if the MetadataStore has any child - resources.) + Deprecated: Field is no longer supported. """ name = proto.Field(proto.STRING, number=1,) @@ -364,13 +361,14 @@ class UpdateArtifactRequest(proto.Message): projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. A FieldMask indicating which fields - should be updated. + should be updated. Functionality of this field + is not yet supported. allow_missing (bool): If set to true, and the [Artifact][google.cloud.aiplatform.v1beta1.Artifact] is not found, a new - [Artifact][google.cloud.aiplatform.v1beta1.Artifact] will be - created. In this situation, ``update_mask`` is ignored. + [Artifact][google.cloud.aiplatform.v1beta1.Artifact] is + created. """ artifact = proto.Field(proto.MESSAGE, number=1, message=gca_artifact.Artifact,) @@ -598,13 +596,14 @@ class UpdateContextRequest(proto.Message): projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. A FieldMask indicating which fields - should be updated. + should be updated. Functionality of this field + is not yet supported. allow_missing (bool): If set to true, and the [Context][google.cloud.aiplatform.v1beta1.Context] is not found, a new - [Context][google.cloud.aiplatform.v1beta1.Context] will be - created. In this situation, ``update_mask`` is ignored. + [Context][google.cloud.aiplatform.v1beta1.Context] is + created. """ context = proto.Field(proto.MESSAGE, number=1, message=gca_context.Context,) @@ -907,13 +906,14 @@ class UpdateExecutionRequest(proto.Message): projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. A FieldMask indicating which fields - should be updated. + should be updated. Functionality of this field + is not yet supported. allow_missing (bool): If set to true, and the [Execution][google.cloud.aiplatform.v1beta1.Execution] is not found, a new - [Execution][google.cloud.aiplatform.v1beta1.Execution] will - be created. In this situation, ``update_mask`` is ignored. + [Execution][google.cloud.aiplatform.v1beta1.Execution] is + created. """ execution = proto.Field(proto.MESSAGE, number=1, message=gca_execution.Execution,) diff --git a/google/cloud/aiplatform_v1beta1/types/migration_service.py b/google/cloud/aiplatform_v1beta1/types/migration_service.py index 4219a6a329..05fbb5f34e 100644 --- a/google/cloud/aiplatform_v1beta1/types/migration_service.py +++ b/google/cloud/aiplatform_v1beta1/types/migration_service.py @@ -212,7 +212,7 @@ class MigrateAutomlDatasetConfig(proto.Message): class MigrateDataLabelingDatasetConfig(proto.Message): r"""Config for migrating Dataset in datalabeling.googleapis.com - to AI Platform's Dataset. + to Vertex AI's Dataset. Attributes: dataset (str): diff --git a/google/cloud/aiplatform_v1beta1/types/model.py b/google/cloud/aiplatform_v1beta1/types/model.py index 1742047247..b5566b0f4e 100644 --- a/google/cloud/aiplatform_v1beta1/types/model.py +++ b/google/cloud/aiplatform_v1beta1/types/model.py @@ -405,8 +405,8 @@ class ModelContainerSpec(proto.Message): identify an image in Artifact Registry or Container Registry. Learn more about the `container publishing requirements `__, - including permissions requirements for the AI Platform - Service Agent. + including permissions requirements for the Vertex AI Service + Agent. The container image is ingested upon [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel], @@ -538,8 +538,8 @@ class ModelContainerSpec(proto.Message): ports (Sequence[google.cloud.aiplatform_v1beta1.types.Port]): Immutable. List of ports to expose from the container. Vertex AI sends any prediction requests that it receives to - the first port on this list. AI Platform also sends - `liveness and health + the first port on this list. Vertex AI also sends `liveness + and health checks `__ to this port. diff --git a/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py b/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py index e557b7109f..2774fb9b46 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py +++ b/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py @@ -15,6 +15,7 @@ # import proto # type: ignore +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats from google.cloud.aiplatform_v1beta1.types import io from google.cloud.aiplatform_v1beta1.types import job_state @@ -22,6 +23,7 @@ from google.protobuf import duration_pb2 # type: ignore from google.protobuf import struct_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore __protobuf__ = proto.module( @@ -153,6 +155,15 @@ class ModelDeploymentMonitoringJob(proto.Message): round. stats_anomalies_base_directory (google.cloud.aiplatform_v1beta1.types.GcsDestination): Stats anomalies base folder path. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Customer-managed encryption key spec for a + ModelDeploymentMonitoringJob. If set, this + ModelDeploymentMonitoringJob and all sub- + resources of this ModelDeploymentMonitoringJob + will be secured by this key. + error (google.rpc.status_pb2.Status): + Output only. Only populated when the job's state is + ``JOB_STATE_FAILED`` or ``JOB_STATE_CANCELLED``. """ class MonitoringScheduleState(proto.Enum): @@ -201,6 +212,10 @@ class MonitoringScheduleState(proto.Enum): stats_anomalies_base_directory = proto.Field( proto.MESSAGE, number=20, message=io.GcsDestination, ) + encryption_spec = proto.Field( + proto.MESSAGE, number=21, message=gca_encryption_spec.EncryptionSpec, + ) + error = proto.Field(proto.MESSAGE, number=23, message=status_pb2.Status,) class ModelDeploymentMonitoringBigQueryTable(proto.Message): diff --git a/google/cloud/aiplatform_v1beta1/types/pipeline_service.py b/google/cloud/aiplatform_v1beta1/types/pipeline_service.py index 31792d0f9a..fba58287f6 100644 --- a/google/cloud/aiplatform_v1beta1/types/pipeline_service.py +++ b/google/cloud/aiplatform_v1beta1/types/pipeline_service.py @@ -83,35 +83,21 @@ class ListTrainingPipelinesRequest(proto.Message): TrainingPipelines from. Format: ``projects/{project}/locations/{location}`` filter (str): - Lists the PipelineJobs that match the filter expression. The - following fields are supported: + The standard list filter. Supported fields: - - ``pipeline_name``: Supports ``=`` and ``!=`` comparisons. - - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``end_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``labels``: Supports key-value equality and key presence. + - ``display_name`` supports = and !=. - Filter expressions can be combined together using logical - operators (``AND`` & ``OR``). For example: - ``pipeline_name="test" AND create_time>"2020-05-18T13:30:00Z"``. + - ``state`` supports = and !=. - The syntax to define filter expression is based on - https://google.aip.dev/160. + Some examples of using the filter are: - Examples: + - ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"`` - - ``create_time>"2021-05-18T00:00:00Z" OR update_time>"2020-05-18T00:00:00Z"`` - PipelineJobs created or updated after 2020-05-18 00:00:00 - UTC. - - ``labels.env = "prod"`` PipelineJobs with label "env" set - to "prod". + - ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"`` + + - ``NOT display_name="my_pipeline"`` + + - ``state="PIPELINE_STATE_FAILED"`` page_size (int): The standard list page size. page_token (str): @@ -233,18 +219,35 @@ class ListPipelineJobsRequest(proto.Message): PipelineJobs from. Format: ``projects/{project}/locations/{location}`` filter (str): - The standard list filter. Supported fields: + Lists the PipelineJobs that match the filter expression. The + following fields are supported: - - ``display_name`` supports ``=`` and ``!=``. - - ``state`` supports ``=`` and ``!=``. + - ``pipeline_name``: Supports ``=`` and ``!=`` comparisons. + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC + 3339 format. + - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC + 3339 format. + - ``end_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC + 3339 format. + - ``labels``: Supports key-value equality and key presence. - The following examples demonstrate how to filter the list of - PipelineJobs: + Filter expressions can be combined together using logical + operators (``AND`` & ``OR``). For example: + ``pipeline_name="test" AND create_time>"2020-05-18T13:30:00Z"``. - - ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"`` - - ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"`` - - ``NOT display_name="my_pipeline"`` - - ``state="PIPELINE_STATE_FAILED"`` + The syntax to define filter expression is based on + https://google.aip.dev/160. + + Examples: + + - ``create_time>"2021-05-18T00:00:00Z" OR update_time>"2020-05-18T00:00:00Z"`` + PipelineJobs created or updated after 2020-05-18 00:00:00 + UTC. + - ``labels.env = "prod"`` PipelineJobs with label "env" set + to "prod". page_size (int): The standard list page size. page_token (str): diff --git a/google/cloud/aiplatform_v1beta1/types/prediction_service.py b/google/cloud/aiplatform_v1beta1/types/prediction_service.py index 669b28a66a..e23b49aa93 100644 --- a/google/cloud/aiplatform_v1beta1/types/prediction_service.py +++ b/google/cloud/aiplatform_v1beta1/types/prediction_service.py @@ -15,6 +15,7 @@ # import proto # type: ignore +from google.api import httpbody_pb2 # type: ignore from google.cloud.aiplatform_v1beta1.types import explanation from google.protobuf import struct_pb2 # type: ignore @@ -24,6 +25,7 @@ manifest={ "PredictRequest", "PredictResponse", + "RawPredictRequest", "ExplainRequest", "ExplainResponse", }, @@ -88,6 +90,42 @@ class PredictResponse(proto.Message): deployed_model_id = proto.Field(proto.STRING, number=2,) +class RawPredictRequest(proto.Message): + r"""Request message for + [PredictionService.RawPredict][google.cloud.aiplatform.v1beta1.PredictionService.RawPredict]. + + Attributes: + endpoint (str): + Required. The name of the Endpoint requested to serve the + prediction. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + http_body (google.api.httpbody_pb2.HttpBody): + The prediction input. Supports HTTP headers and arbitrary + data payload. + + A + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + may have an upper limit on the number of instances it + supports per request. When this limit it is exceeded for an + AutoML model, the + [RawPredict][google.cloud.aiplatform.v1beta1.PredictionService.RawPredict] + method returns an error. When this limit is exceeded for a + custom-trained model, the behavior varies depending on the + model. + + You can specify the schema for each instance in the + [predict_schemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] + field when you create a + [Model][google.cloud.aiplatform.v1beta1.Model]. This schema + applies when you deploy the ``Model`` as a ``DeployedModel`` + to an [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] + and use the ``RawPredict`` method. + """ + + endpoint = proto.Field(proto.STRING, number=1,) + http_body = proto.Field(proto.MESSAGE, number=2, message=httpbody_pb2.HttpBody,) + + class ExplainRequest(proto.Message): r"""Request message for [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. diff --git a/google/cloud/aiplatform_v1beta1/types/specialist_pool.py b/google/cloud/aiplatform_v1beta1/types/specialist_pool.py index 0e7fe971e9..7a1ca8f240 100644 --- a/google/cloud/aiplatform_v1beta1/types/specialist_pool.py +++ b/google/cloud/aiplatform_v1beta1/types/specialist_pool.py @@ -24,12 +24,11 @@ class SpecialistPool(proto.Message): r"""SpecialistPool represents customers' own workforce to work on their data labeling jobs. It includes a group of specialist - managers who are responsible for managing the labelers in this - pool as well as customers' data labeling jobs associated with - this pool. - Customers create specialist pool as well as start data labeling - jobs on Cloud, managers and labelers work with the jobs using - CrowdCompute console. + managers and workers. Managers are responsible for managing the + workers in this pool as well as customers' data labeling jobs + associated with this pool. Customers create specialist pool as + well as start data labeling jobs on Cloud, managers and workers + handle the jobs using CrowdCompute console. Attributes: name (str): @@ -42,10 +41,10 @@ class SpecialistPool(proto.Message): characters. This field should be unique on project-level. specialist_managers_count (int): - Output only. The number of Specialists in - this SpecialistPool. + Output only. The number of managers in this + SpecialistPool. specialist_manager_emails (Sequence[str]): - The email addresses of the specialists in the + The email addresses of the managers in the SpecialistPool. pending_data_labeling_jobs (Sequence[str]): Output only. The resource name of the pending diff --git a/google/cloud/aiplatform_v1beta1/types/study.py b/google/cloud/aiplatform_v1beta1/types/study.py index f43eae65f2..b3d763ccdf 100644 --- a/google/cloud/aiplatform_v1beta1/types/study.py +++ b/google/cloud/aiplatform_v1beta1/types/study.py @@ -112,11 +112,22 @@ class Trial(proto.Message): Trial. It's set for a HyperparameterTuningJob's Trial. web_access_uris (Sequence[google.cloud.aiplatform_v1beta1.types.Trial.WebAccessUrisEntry]): - Output only. The web access URIs for the - training job. The keys are the node names in the - training jobs, e.g. workerpool0-0. The values - are the URIs for each node's web portal in the - job. + Output only. URIs for accessing `interactive + shells `__ + (one URI for each training node). Only available if this + trial is part of a + [HyperparameterTuningJob][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob] + and the job's + [trial_job_spec.enable_web_access][google.cloud.aiplatform.v1beta1.CustomJobSpec.enable_web_access] + field is ``true``. + + The keys are names of each node used for the trial; for + example, ``workerpool0-0`` for the primary node, + ``workerpool1-0`` for the first node in the second worker + pool, and ``workerpool1-1`` for the second node in the + second worker pool. + + The values are the URIs for each node's interactive shell. """ class State(proto.Enum): @@ -170,6 +181,7 @@ class StudySpec(proto.Message): The automated early stopping spec using median rule. convex_stop_config (google.cloud.aiplatform_v1beta1.types.StudySpec.ConvexStopConfig): + Deprecated. The automated early stopping using convex stopping rule. metrics (Sequence[google.cloud.aiplatform_v1beta1.types.StudySpec.MetricSpec]): diff --git a/google/cloud/aiplatform_v1beta1/types/tensorboard_run.py b/google/cloud/aiplatform_v1beta1/types/tensorboard_run.py index 030d8de1cf..1b6e250cc4 100644 --- a/google/cloud/aiplatform_v1beta1/types/tensorboard_run.py +++ b/google/cloud/aiplatform_v1beta1/types/tensorboard_run.py @@ -46,7 +46,27 @@ class TensorboardRun(proto.Message): Output only. Timestamp when this TensorboardRun was last updated. labels (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardRun.LabelsEntry]): + The labels with user-defined metadata to organize your + TensorboardRuns. + This field will be used to filter and visualize Runs in the + Tensorboard UI. For example, a Vertex AI training job can + set a label aiplatform.googleapis.com/training_job_id=xxxxx + to all the runs created within that job. An end user can set + a label experiment_id=xxxxx for all the runs produced in a + Jupyter notebook. These runs can be grouped by a label value + and visualized together in the Tensorboard UI. + + Label keys and values can be no longer than 64 characters + (Unicode codepoints), can only contain lowercase letters, + numeric characters, underscores and dashes. International + characters are allowed. No more than 64 user labels can be + associated with one TensorboardRun (System labels are + excluded). + + See https://goo.gl/xmQnxf for more information and examples + of labels. System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. etag (str): Used to perform a consistent read-modify- rite updates. If not set, a blind "overwrite" diff --git a/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py b/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py index 0691fd97d3..a4f979f320 100644 --- a/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py +++ b/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py @@ -43,6 +43,8 @@ "ListTensorboardExperimentsResponse", "UpdateTensorboardExperimentRequest", "DeleteTensorboardExperimentRequest", + "BatchCreateTensorboardRunsRequest", + "BatchCreateTensorboardRunsResponse", "CreateTensorboardRunRequest", "GetTensorboardRunRequest", "ReadTensorboardBlobDataRequest", @@ -51,6 +53,8 @@ "ListTensorboardRunsResponse", "UpdateTensorboardRunRequest", "DeleteTensorboardRunRequest", + "BatchCreateTensorboardTimeSeriesRequest", + "BatchCreateTensorboardTimeSeriesResponse", "CreateTensorboardTimeSeriesRequest", "GetTensorboardTimeSeriesRequest", "ListTensorboardTimeSeriesRequest", @@ -59,6 +63,8 @@ "DeleteTensorboardTimeSeriesRequest", "ReadTensorboardTimeSeriesDataRequest", "ReadTensorboardTimeSeriesDataResponse", + "WriteTensorboardExperimentDataRequest", + "WriteTensorboardExperimentDataResponse", "WriteTensorboardRunDataRequest", "WriteTensorboardRunDataResponse", "ExportTensorboardTimeSeriesDataRequest", @@ -107,9 +113,9 @@ class ListTensorboardsRequest(proto.Message): Attributes: parent (str): - Required. The resource name of the Location - to list Tensorboards. Format: - 'projects/{project}/locations/{location}' + Required. The resource name of the Location to list + Tensorboards. Format: + ``projects/{project}/locations/{location}`` filter (str): Lists the Tensorboards that match the filter expression. @@ -360,14 +366,51 @@ class DeleteTensorboardExperimentRequest(proto.Message): name = proto.Field(proto.STRING, number=1,) +class BatchCreateTensorboardRunsRequest(proto.Message): + r"""Request message for + [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns]. + + Attributes: + parent (str): + Required. The resource name of the TensorboardExperiment to + create the TensorboardRuns in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + The parent field in the CreateTensorboardRunRequest messages + must match this field. + requests (Sequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest]): + Required. The request message specifying the + TensorboardRuns to create. A maximum of 1000 + TensorboardRuns can be created in a batch. + """ + + parent = proto.Field(proto.STRING, number=1,) + requests = proto.RepeatedField( + proto.MESSAGE, number=2, message="CreateTensorboardRunRequest", + ) + + +class BatchCreateTensorboardRunsResponse(proto.Message): + r"""Response message for + [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns]. + + Attributes: + tensorboard_runs (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardRun]): + The created TensorboardRuns. + """ + + tensorboard_runs = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_tensorboard_run.TensorboardRun, + ) + + class CreateTensorboardRunRequest(proto.Message): r"""Request message for [TensorboardService.CreateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun]. Attributes: parent (str): - Required. The resource name of the Tensorboard to create the - TensorboardRun in. Format: + Required. The resource name of the TensorboardExperiment to + create the TensorboardRun in. Format: ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` tensorboard_run (google.cloud.aiplatform_v1beta1.types.TensorboardRun): Required. The TensorboardRun to create. @@ -438,7 +481,8 @@ class ListTensorboardRunsRequest(proto.Message): Attributes: parent (str): Required. The resource name of the - Tensorboard to list TensorboardRuns. Format: + TensorboardExperiment to list TensorboardRuns. + Format: 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}' filter (str): Lists the TensorboardRuns that match the @@ -537,6 +581,47 @@ class DeleteTensorboardRunRequest(proto.Message): name = proto.Field(proto.STRING, number=1,) +class BatchCreateTensorboardTimeSeriesRequest(proto.Message): + r"""Request message for + [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries]. + + Attributes: + parent (str): + Required. The resource name of the TensorboardExperiment to + create the TensorboardTimeSeries in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + The TensorboardRuns referenced by the parent fields in the + CreateTensorboardTimeSeriesRequest messages must be sub + resources of this TensorboardExperiment. + requests (Sequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest]): + Required. The request message specifying the + TensorboardTimeSeries to create. A maximum of + 1000 TensorboardTimeSeries can be created in a + batch. + """ + + parent = proto.Field(proto.STRING, number=1,) + requests = proto.RepeatedField( + proto.MESSAGE, number=2, message="CreateTensorboardTimeSeriesRequest", + ) + + +class BatchCreateTensorboardTimeSeriesResponse(proto.Message): + r"""Response message for + [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries]. + + Attributes: + tensorboard_time_series (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries]): + The created TensorboardTimeSeries. + """ + + tensorboard_time_series = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_tensorboard_time_series.TensorboardTimeSeries, + ) + + class CreateTensorboardTimeSeriesRequest(proto.Message): r"""Request message for [TensorboardService.CreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardTimeSeries]. @@ -731,6 +816,32 @@ class ReadTensorboardTimeSeriesDataResponse(proto.Message): ) +class WriteTensorboardExperimentDataRequest(proto.Message): + r"""Request message for + [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData]. + + Attributes: + tensorboard_experiment (str): + Required. The resource name of the TensorboardExperiment to + write data to. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + write_run_data_requests (Sequence[google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest]): + Required. Requests containing per-run + TensorboardTimeSeries data to write. + """ + + tensorboard_experiment = proto.Field(proto.STRING, number=1,) + write_run_data_requests = proto.RepeatedField( + proto.MESSAGE, number=2, message="WriteTensorboardRunDataRequest", + ) + + +class WriteTensorboardExperimentDataResponse(proto.Message): + r"""Response message for + [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData]. + """ + + class WriteTensorboardRunDataRequest(proto.Message): r"""Request message for [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py index 0d91446271..58daf62854 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py @@ -740,6 +740,7 @@ def test_get_dataset( call.return_value = dataset.Dataset( name="name_value", display_name="display_name_value", + description="description_value", metadata_schema_uri="metadata_schema_uri_value", etag="etag_value", ) @@ -754,6 +755,7 @@ def test_get_dataset( assert isinstance(response, dataset.Dataset) assert response.name == "name_value" assert response.display_name == "display_name_value" + assert response.description == "description_value" assert response.metadata_schema_uri == "metadata_schema_uri_value" assert response.etag == "etag_value" @@ -796,6 +798,7 @@ async def test_get_dataset_async( dataset.Dataset( name="name_value", display_name="display_name_value", + description="description_value", metadata_schema_uri="metadata_schema_uri_value", etag="etag_value", ) @@ -811,6 +814,7 @@ async def test_get_dataset_async( assert isinstance(response, dataset.Dataset) assert response.name == "name_value" assert response.display_name == "display_name_value" + assert response.description == "description_value" assert response.metadata_schema_uri == "metadata_schema_uri_value" assert response.etag == "etag_value" @@ -954,6 +958,7 @@ def test_update_dataset( call.return_value = gca_dataset.Dataset( name="name_value", display_name="display_name_value", + description="description_value", metadata_schema_uri="metadata_schema_uri_value", etag="etag_value", ) @@ -968,6 +973,7 @@ def test_update_dataset( assert isinstance(response, gca_dataset.Dataset) assert response.name == "name_value" assert response.display_name == "display_name_value" + assert response.description == "description_value" assert response.metadata_schema_uri == "metadata_schema_uri_value" assert response.etag == "etag_value" @@ -1010,6 +1016,7 @@ async def test_update_dataset_async( gca_dataset.Dataset( name="name_value", display_name="display_name_value", + description="description_value", metadata_schema_uri="metadata_schema_uri_value", etag="etag_value", ) @@ -1025,6 +1032,7 @@ async def test_update_dataset_async( assert isinstance(response, gca_dataset.Dataset) assert response.name == "name_value" assert response.display_name == "display_name_value" + assert response.description == "description_value" assert response.metadata_schema_uri == "metadata_schema_uri_value" assert response.etag == "etag_value" diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py index 4fdc429249..9314f3b1d7 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py @@ -753,6 +753,7 @@ def test_get_endpoint( description="description_value", etag="etag_value", network="network_value", + model_deployment_monitoring_job="model_deployment_monitoring_job_value", ) response = client.get_endpoint(request) @@ -768,6 +769,10 @@ def test_get_endpoint( assert response.description == "description_value" assert response.etag == "etag_value" assert response.network == "network_value" + assert ( + response.model_deployment_monitoring_job + == "model_deployment_monitoring_job_value" + ) def test_get_endpoint_from_dict(): @@ -811,6 +816,7 @@ async def test_get_endpoint_async( description="description_value", etag="etag_value", network="network_value", + model_deployment_monitoring_job="model_deployment_monitoring_job_value", ) ) response = await client.get_endpoint(request) @@ -827,6 +833,10 @@ async def test_get_endpoint_async( assert response.description == "description_value" assert response.etag == "etag_value" assert response.network == "network_value" + assert ( + response.model_deployment_monitoring_job + == "model_deployment_monitoring_job_value" + ) @pytest.mark.asyncio @@ -1327,6 +1337,7 @@ def test_update_endpoint( description="description_value", etag="etag_value", network="network_value", + model_deployment_monitoring_job="model_deployment_monitoring_job_value", ) response = client.update_endpoint(request) @@ -1342,6 +1353,10 @@ def test_update_endpoint( assert response.description == "description_value" assert response.etag == "etag_value" assert response.network == "network_value" + assert ( + response.model_deployment_monitoring_job + == "model_deployment_monitoring_job_value" + ) def test_update_endpoint_from_dict(): @@ -1385,6 +1400,7 @@ async def test_update_endpoint_async( description="description_value", etag="etag_value", network="network_value", + model_deployment_monitoring_job="model_deployment_monitoring_job_value", ) ) response = await client.update_endpoint(request) @@ -1401,6 +1417,10 @@ async def test_update_endpoint_async( assert response.description == "description_value" assert response.etag == "etag_value" assert response.network == "network_value" + assert ( + response.model_deployment_monitoring_job + == "model_deployment_monitoring_job_value" + ) @pytest.mark.asyncio @@ -2748,9 +2768,37 @@ def test_parse_model_path(): assert expected == actual -def test_network_path(): +def test_model_deployment_monitoring_job_path(): project = "squid" - network = "clam" + location = "clam" + model_deployment_monitoring_job = "whelk" + expected = "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format( + project=project, + location=location, + model_deployment_monitoring_job=model_deployment_monitoring_job, + ) + actual = EndpointServiceClient.model_deployment_monitoring_job_path( + project, location, model_deployment_monitoring_job + ) + assert expected == actual + + +def test_parse_model_deployment_monitoring_job_path(): + expected = { + "project": "octopus", + "location": "oyster", + "model_deployment_monitoring_job": "nudibranch", + } + path = EndpointServiceClient.model_deployment_monitoring_job_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_model_deployment_monitoring_job_path(path) + assert expected == actual + + +def test_network_path(): + project = "cuttlefish" + network = "mussel" expected = "projects/{project}/global/networks/{network}".format( project=project, network=network, ) @@ -2760,8 +2808,8 @@ def test_network_path(): def test_parse_network_path(): expected = { - "project": "whelk", - "network": "octopus", + "project": "winkle", + "network": "nautilus", } path = EndpointServiceClient.network_path(**expected) @@ -2771,7 +2819,7 @@ def test_parse_network_path(): def test_common_billing_account_path(): - billing_account = "oyster" + billing_account = "scallop" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -2781,7 +2829,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "nudibranch", + "billing_account": "abalone", } path = EndpointServiceClient.common_billing_account_path(**expected) @@ -2791,7 +2839,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "cuttlefish" + folder = "squid" expected = "folders/{folder}".format(folder=folder,) actual = EndpointServiceClient.common_folder_path(folder) assert expected == actual @@ -2799,7 +2847,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "mussel", + "folder": "clam", } path = EndpointServiceClient.common_folder_path(**expected) @@ -2809,7 +2857,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "winkle" + organization = "whelk" expected = "organizations/{organization}".format(organization=organization,) actual = EndpointServiceClient.common_organization_path(organization) assert expected == actual @@ -2817,7 +2865,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "nautilus", + "organization": "octopus", } path = EndpointServiceClient.common_organization_path(**expected) @@ -2827,7 +2875,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "scallop" + project = "oyster" expected = "projects/{project}".format(project=project,) actual = EndpointServiceClient.common_project_path(project) assert expected == actual @@ -2835,7 +2883,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "abalone", + "project": "nudibranch", } path = EndpointServiceClient.common_project_path(**expected) @@ -2845,8 +2893,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "squid" - location = "clam" + project = "cuttlefish" + location = "mussel" expected = "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -2856,8 +2904,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "whelk", - "location": "octopus", + "project": "winkle", + "location": "nautilus", } path = EndpointServiceClient.common_location_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py index 1dfa2f3e71..475c192c60 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py @@ -1683,18 +1683,20 @@ def test_parse_annotated_dataset_path(): def test_dataset_path(): project = "cuttlefish" - dataset = "mussel" - expected = "projects/{project}/datasets/{dataset}".format( - project=project, dataset=dataset, + location = "mussel" + dataset = "winkle" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "winkle", - "dataset": "nautilus", + "project": "nautilus", + "location": "scallop", + "dataset": "abalone", } path = MigrationServiceClient.dataset_path(**expected) @@ -1704,9 +1706,9 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "scallop" - location = "abalone" - dataset = "squid" + project = "squid" + location = "clam" + dataset = "whelk" expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, location=location, dataset=dataset, ) @@ -1716,9 +1718,9 @@ def test_dataset_path(): def test_parse_dataset_path(): expected = { - "project": "clam", - "location": "whelk", - "dataset": "octopus", + "project": "octopus", + "location": "oyster", + "dataset": "nudibranch", } path = MigrationServiceClient.dataset_path(**expected) @@ -1728,20 +1730,18 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "oyster" - location = "nudibranch" - dataset = "cuttlefish" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( - project=project, location=location, dataset=dataset, + project = "cuttlefish" + dataset = "mussel" + expected = "projects/{project}/datasets/{dataset}".format( + project=project, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "mussel", - "location": "winkle", + "project": "winkle", "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py index 559531c0e1..fda9da087a 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py @@ -24,6 +24,7 @@ from proto.marshal.rules.dates import DurationRule, TimestampRule +from google.api import httpbody_pb2 # type: ignore from google.api_core import client_options from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 @@ -44,6 +45,7 @@ from google.cloud.aiplatform_v1beta1.types import explanation from google.cloud.aiplatform_v1beta1.types import prediction_service from google.oauth2 import service_account +from google.protobuf import any_pb2 # type: ignore from google.protobuf import struct_pb2 # type: ignore import google.auth @@ -679,6 +681,228 @@ async def test_predict_flattened_error_async(): ) +def test_raw_predict( + transport: str = "grpc", request_type=prediction_service.RawPredictRequest +): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.raw_predict), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = httpbody_pb2.HttpBody( + content_type="content_type_value", data=b"data_blob", + ) + response = client.raw_predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.RawPredictRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, httpbody_pb2.HttpBody) + assert response.content_type == "content_type_value" + assert response.data == b"data_blob" + + +def test_raw_predict_from_dict(): + test_raw_predict(request_type=dict) + + +def test_raw_predict_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.raw_predict), "__call__") as call: + client.raw_predict() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.RawPredictRequest() + + +@pytest.mark.asyncio +async def test_raw_predict_async( + transport: str = "grpc_asyncio", request_type=prediction_service.RawPredictRequest +): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.raw_predict), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + httpbody_pb2.HttpBody(content_type="content_type_value", data=b"data_blob",) + ) + response = await client.raw_predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.RawPredictRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, httpbody_pb2.HttpBody) + assert response.content_type == "content_type_value" + assert response.data == b"data_blob" + + +@pytest.mark.asyncio +async def test_raw_predict_async_from_dict(): + await test_raw_predict_async(request_type=dict) + + +def test_raw_predict_field_headers(): + client = PredictionServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.RawPredictRequest() + + request.endpoint = "endpoint/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.raw_predict), "__call__") as call: + call.return_value = httpbody_pb2.HttpBody() + client.raw_predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "endpoint=endpoint/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_raw_predict_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.RawPredictRequest() + + request.endpoint = "endpoint/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.raw_predict), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + httpbody_pb2.HttpBody() + ) + await client.raw_predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "endpoint=endpoint/value",) in kw["metadata"] + + +def test_raw_predict_flattened(): + client = PredictionServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.raw_predict), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = httpbody_pb2.HttpBody() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.raw_predict( + endpoint="endpoint_value", + http_body=httpbody_pb2.HttpBody(content_type="content_type_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].endpoint == "endpoint_value" + assert args[0].http_body == httpbody_pb2.HttpBody( + content_type="content_type_value" + ) + + +def test_raw_predict_flattened_error(): + client = PredictionServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.raw_predict( + prediction_service.RawPredictRequest(), + endpoint="endpoint_value", + http_body=httpbody_pb2.HttpBody(content_type="content_type_value"), + ) + + +@pytest.mark.asyncio +async def test_raw_predict_flattened_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.raw_predict), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = httpbody_pb2.HttpBody() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + httpbody_pb2.HttpBody() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.raw_predict( + endpoint="endpoint_value", + http_body=httpbody_pb2.HttpBody(content_type="content_type_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].endpoint == "endpoint_value" + assert args[0].http_body == httpbody_pb2.HttpBody( + content_type="content_type_value" + ) + + +@pytest.mark.asyncio +async def test_raw_predict_flattened_error_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.raw_predict( + prediction_service.RawPredictRequest(), + endpoint="endpoint_value", + http_body=httpbody_pb2.HttpBody(content_type="content_type_value"), + ) + + def test_explain( transport: str = "grpc", request_type=prediction_service.ExplainRequest ): @@ -948,6 +1172,7 @@ def test_prediction_service_base_transport(): # raise NotImplementedError. methods = ( "predict", + "raw_predict", "explain", ) for method in methods: diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py index 15caab1a47..2c71391c98 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py @@ -3542,6 +3542,252 @@ async def test_create_tensorboard_run_flattened_error_async(): ) +def test_batch_create_tensorboard_runs( + transport: str = "grpc", + request_type=tensorboard_service.BatchCreateTensorboardRunsRequest, +): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_runs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.BatchCreateTensorboardRunsResponse() + response = client.batch_create_tensorboard_runs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.BatchCreateTensorboardRunsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.BatchCreateTensorboardRunsResponse) + + +def test_batch_create_tensorboard_runs_from_dict(): + test_batch_create_tensorboard_runs(request_type=dict) + + +def test_batch_create_tensorboard_runs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_runs), "__call__" + ) as call: + client.batch_create_tensorboard_runs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.BatchCreateTensorboardRunsRequest() + + +@pytest.mark.asyncio +async def test_batch_create_tensorboard_runs_async( + transport: str = "grpc_asyncio", + request_type=tensorboard_service.BatchCreateTensorboardRunsRequest, +): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_runs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.BatchCreateTensorboardRunsResponse() + ) + response = await client.batch_create_tensorboard_runs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.BatchCreateTensorboardRunsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.BatchCreateTensorboardRunsResponse) + + +@pytest.mark.asyncio +async def test_batch_create_tensorboard_runs_async_from_dict(): + await test_batch_create_tensorboard_runs_async(request_type=dict) + + +def test_batch_create_tensorboard_runs_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.BatchCreateTensorboardRunsRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_runs), "__call__" + ) as call: + call.return_value = tensorboard_service.BatchCreateTensorboardRunsResponse() + client.batch_create_tensorboard_runs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_batch_create_tensorboard_runs_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.BatchCreateTensorboardRunsRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_runs), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.BatchCreateTensorboardRunsResponse() + ) + await client.batch_create_tensorboard_runs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_batch_create_tensorboard_runs_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_runs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.BatchCreateTensorboardRunsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_create_tensorboard_runs( + parent="parent_value", + requests=[ + tensorboard_service.CreateTensorboardRunRequest(parent="parent_value") + ], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + assert args[0].requests == [ + tensorboard_service.CreateTensorboardRunRequest(parent="parent_value") + ] + + +def test_batch_create_tensorboard_runs_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_create_tensorboard_runs( + tensorboard_service.BatchCreateTensorboardRunsRequest(), + parent="parent_value", + requests=[ + tensorboard_service.CreateTensorboardRunRequest(parent="parent_value") + ], + ) + + +@pytest.mark.asyncio +async def test_batch_create_tensorboard_runs_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_runs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.BatchCreateTensorboardRunsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.BatchCreateTensorboardRunsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_create_tensorboard_runs( + parent="parent_value", + requests=[ + tensorboard_service.CreateTensorboardRunRequest(parent="parent_value") + ], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + assert args[0].requests == [ + tensorboard_service.CreateTensorboardRunRequest(parent="parent_value") + ] + + +@pytest.mark.asyncio +async def test_batch_create_tensorboard_runs_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_create_tensorboard_runs( + tensorboard_service.BatchCreateTensorboardRunsRequest(), + parent="parent_value", + requests=[ + tensorboard_service.CreateTensorboardRunRequest(parent="parent_value") + ], + ) + + def test_get_tensorboard_run( transport: str = "grpc", request_type=tensorboard_service.GetTensorboardRunRequest ): @@ -4527,27 +4773,257 @@ async def test_delete_tensorboard_run_async( @pytest.mark.asyncio -async def test_delete_tensorboard_run_async_from_dict(): - await test_delete_tensorboard_run_async(request_type=dict) +async def test_delete_tensorboard_run_async_from_dict(): + await test_delete_tensorboard_run_async(request_type=dict) + + +def test_delete_tensorboard_run_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardRunRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_tensorboard_run_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardRunRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.delete_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_tensorboard_run_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_tensorboard_run(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +def test_delete_tensorboard_run_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_tensorboard_run( + tensorboard_service.DeleteTensorboardRunRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_tensorboard_run_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_tensorboard_run(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_tensorboard_run_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_tensorboard_run( + tensorboard_service.DeleteTensorboardRunRequest(), name="name_value", + ) + + +def test_batch_create_tensorboard_time_series( + transport: str = "grpc", + request_type=tensorboard_service.BatchCreateTensorboardTimeSeriesRequest, +): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + tensorboard_service.BatchCreateTensorboardTimeSeriesResponse() + ) + response = client.batch_create_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.BatchCreateTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance( + response, tensorboard_service.BatchCreateTensorboardTimeSeriesResponse + ) + + +def test_batch_create_tensorboard_time_series_from_dict(): + test_batch_create_tensorboard_time_series(request_type=dict) + + +def test_batch_create_tensorboard_time_series_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_time_series), "__call__" + ) as call: + client.batch_create_tensorboard_time_series() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.BatchCreateTensorboardTimeSeriesRequest() + + +@pytest.mark.asyncio +async def test_batch_create_tensorboard_time_series_async( + transport: str = "grpc_asyncio", + request_type=tensorboard_service.BatchCreateTensorboardTimeSeriesRequest, +): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_time_series), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.BatchCreateTensorboardTimeSeriesResponse() + ) + response = await client.batch_create_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.BatchCreateTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance( + response, tensorboard_service.BatchCreateTensorboardTimeSeriesResponse + ) + + +@pytest.mark.asyncio +async def test_batch_create_tensorboard_time_series_async_from_dict(): + await test_batch_create_tensorboard_time_series_async(request_type=dict) -def test_delete_tensorboard_run_field_headers(): +def test_batch_create_tensorboard_time_series_field_headers(): client = TensorboardServiceClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = tensorboard_service.DeleteTensorboardRunRequest() + request = tensorboard_service.BatchCreateTensorboardTimeSeriesRequest() - request.name = "name/value" + request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.delete_tensorboard_run), "__call__" + type(client.transport.batch_create_tensorboard_time_series), "__call__" ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.delete_tensorboard_run(request) + call.return_value = ( + tensorboard_service.BatchCreateTensorboardTimeSeriesResponse() + ) + client.batch_create_tensorboard_time_series(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -4556,29 +5032,29 @@ def test_delete_tensorboard_run_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] @pytest.mark.asyncio -async def test_delete_tensorboard_run_field_headers_async(): +async def test_batch_create_tensorboard_time_series_field_headers_async(): client = TensorboardServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = tensorboard_service.DeleteTensorboardRunRequest() + request = tensorboard_service.BatchCreateTensorboardTimeSeriesRequest() - request.name = "name/value" + request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.delete_tensorboard_run), "__call__" + type(client.transport.batch_create_tensorboard_time_series), "__call__" ) as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/op") + tensorboard_service.BatchCreateTensorboardTimeSeriesResponse() ) - await client.delete_tensorboard_run(request) + await client.batch_create_tensorboard_time_series(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -4587,32 +5063,46 @@ async def test_delete_tensorboard_run_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] -def test_delete_tensorboard_run_flattened(): +def test_batch_create_tensorboard_time_series_flattened(): client = TensorboardServiceClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.delete_tensorboard_run), "__call__" + type(client.transport.batch_create_tensorboard_time_series), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") + call.return_value = ( + tensorboard_service.BatchCreateTensorboardTimeSeriesResponse() + ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.delete_tensorboard_run(name="name_value",) + client.batch_create_tensorboard_time_series( + parent="parent_value", + requests=[ + tensorboard_service.CreateTensorboardTimeSeriesRequest( + parent="parent_value" + ) + ], + ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" + assert args[0].parent == "parent_value" + assert args[0].requests == [ + tensorboard_service.CreateTensorboardTimeSeriesRequest( + parent="parent_value" + ) + ] -def test_delete_tensorboard_run_flattened_error(): +def test_batch_create_tensorboard_time_series_flattened_error(): client = TensorboardServiceClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -4620,40 +5110,60 @@ def test_delete_tensorboard_run_flattened_error(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.delete_tensorboard_run( - tensorboard_service.DeleteTensorboardRunRequest(), name="name_value", + client.batch_create_tensorboard_time_series( + tensorboard_service.BatchCreateTensorboardTimeSeriesRequest(), + parent="parent_value", + requests=[ + tensorboard_service.CreateTensorboardTimeSeriesRequest( + parent="parent_value" + ) + ], ) @pytest.mark.asyncio -async def test_delete_tensorboard_run_flattened_async(): +async def test_batch_create_tensorboard_time_series_flattened_async(): client = TensorboardServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.delete_tensorboard_run), "__call__" + type(client.transport.batch_create_tensorboard_time_series), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") + call.return_value = ( + tensorboard_service.BatchCreateTensorboardTimeSeriesResponse() + ) call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") + tensorboard_service.BatchCreateTensorboardTimeSeriesResponse() ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.delete_tensorboard_run(name="name_value",) + response = await client.batch_create_tensorboard_time_series( + parent="parent_value", + requests=[ + tensorboard_service.CreateTensorboardTimeSeriesRequest( + parent="parent_value" + ) + ], + ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" + assert args[0].parent == "parent_value" + assert args[0].requests == [ + tensorboard_service.CreateTensorboardTimeSeriesRequest( + parent="parent_value" + ) + ] @pytest.mark.asyncio -async def test_delete_tensorboard_run_flattened_error_async(): +async def test_batch_create_tensorboard_time_series_flattened_error_async(): client = TensorboardServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -4661,8 +5171,14 @@ async def test_delete_tensorboard_run_flattened_error_async(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.delete_tensorboard_run( - tensorboard_service.DeleteTensorboardRunRequest(), name="name_value", + await client.batch_create_tensorboard_time_series( + tensorboard_service.BatchCreateTensorboardTimeSeriesRequest(), + parent="parent_value", + requests=[ + tensorboard_service.CreateTensorboardTimeSeriesRequest( + parent="parent_value" + ) + ], ) @@ -6618,6 +7134,274 @@ async def test_read_tensorboard_blob_data_flattened_error_async(): ) +def test_write_tensorboard_experiment_data( + transport: str = "grpc", + request_type=tensorboard_service.WriteTensorboardExperimentDataRequest, +): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_experiment_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.WriteTensorboardExperimentDataResponse() + response = client.write_tensorboard_experiment_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.WriteTensorboardExperimentDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance( + response, tensorboard_service.WriteTensorboardExperimentDataResponse + ) + + +def test_write_tensorboard_experiment_data_from_dict(): + test_write_tensorboard_experiment_data(request_type=dict) + + +def test_write_tensorboard_experiment_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_experiment_data), "__call__" + ) as call: + client.write_tensorboard_experiment_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.WriteTensorboardExperimentDataRequest() + + +@pytest.mark.asyncio +async def test_write_tensorboard_experiment_data_async( + transport: str = "grpc_asyncio", + request_type=tensorboard_service.WriteTensorboardExperimentDataRequest, +): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_experiment_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.WriteTensorboardExperimentDataResponse() + ) + response = await client.write_tensorboard_experiment_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.WriteTensorboardExperimentDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance( + response, tensorboard_service.WriteTensorboardExperimentDataResponse + ) + + +@pytest.mark.asyncio +async def test_write_tensorboard_experiment_data_async_from_dict(): + await test_write_tensorboard_experiment_data_async(request_type=dict) + + +def test_write_tensorboard_experiment_data_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.WriteTensorboardExperimentDataRequest() + + request.tensorboard_experiment = "tensorboard_experiment/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_experiment_data), "__call__" + ) as call: + call.return_value = tensorboard_service.WriteTensorboardExperimentDataResponse() + client.write_tensorboard_experiment_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "tensorboard_experiment=tensorboard_experiment/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_write_tensorboard_experiment_data_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.WriteTensorboardExperimentDataRequest() + + request.tensorboard_experiment = "tensorboard_experiment/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_experiment_data), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.WriteTensorboardExperimentDataResponse() + ) + await client.write_tensorboard_experiment_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "tensorboard_experiment=tensorboard_experiment/value", + ) in kw["metadata"] + + +def test_write_tensorboard_experiment_data_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_experiment_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.WriteTensorboardExperimentDataResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.write_tensorboard_experiment_data( + tensorboard_experiment="tensorboard_experiment_value", + write_run_data_requests=[ + tensorboard_service.WriteTensorboardRunDataRequest( + tensorboard_run="tensorboard_run_value" + ) + ], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].tensorboard_experiment == "tensorboard_experiment_value" + assert args[0].write_run_data_requests == [ + tensorboard_service.WriteTensorboardRunDataRequest( + tensorboard_run="tensorboard_run_value" + ) + ] + + +def test_write_tensorboard_experiment_data_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.write_tensorboard_experiment_data( + tensorboard_service.WriteTensorboardExperimentDataRequest(), + tensorboard_experiment="tensorboard_experiment_value", + write_run_data_requests=[ + tensorboard_service.WriteTensorboardRunDataRequest( + tensorboard_run="tensorboard_run_value" + ) + ], + ) + + +@pytest.mark.asyncio +async def test_write_tensorboard_experiment_data_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_experiment_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.WriteTensorboardExperimentDataResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tensorboard_service.WriteTensorboardExperimentDataResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.write_tensorboard_experiment_data( + tensorboard_experiment="tensorboard_experiment_value", + write_run_data_requests=[ + tensorboard_service.WriteTensorboardRunDataRequest( + tensorboard_run="tensorboard_run_value" + ) + ], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].tensorboard_experiment == "tensorboard_experiment_value" + assert args[0].write_run_data_requests == [ + tensorboard_service.WriteTensorboardRunDataRequest( + tensorboard_run="tensorboard_run_value" + ) + ] + + +@pytest.mark.asyncio +async def test_write_tensorboard_experiment_data_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.write_tensorboard_experiment_data( + tensorboard_service.WriteTensorboardExperimentDataRequest(), + tensorboard_experiment="tensorboard_experiment_value", + write_run_data_requests=[ + tensorboard_service.WriteTensorboardRunDataRequest( + tensorboard_run="tensorboard_run_value" + ) + ], + ) + + def test_write_tensorboard_run_data( transport: str = "grpc", request_type=tensorboard_service.WriteTensorboardRunDataRequest, @@ -7416,10 +8200,12 @@ def test_tensorboard_service_base_transport(): "list_tensorboard_experiments", "delete_tensorboard_experiment", "create_tensorboard_run", + "batch_create_tensorboard_runs", "get_tensorboard_run", "update_tensorboard_run", "list_tensorboard_runs", "delete_tensorboard_run", + "batch_create_tensorboard_time_series", "create_tensorboard_time_series", "get_tensorboard_time_series", "update_tensorboard_time_series", @@ -7427,6 +8213,7 @@ def test_tensorboard_service_base_transport(): "delete_tensorboard_time_series", "read_tensorboard_time_series_data", "read_tensorboard_blob_data", + "write_tensorboard_experiment_data", "write_tensorboard_run_data", "export_tensorboard_time_series_data", )