From ca813be08ec2620380b5a12b0d6cdc079e27ba79 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 2 Dec 2021 11:57:10 -0800 Subject: [PATCH] feat: add enable_private_service_connect field to Endpoint feat: add id field to DeployedModel feat: add service_attachment field to PrivateEndpoints feat: add endpoint_id to CreateEndpointRequest and method signature to CreateEndpoint feat: add method... (#878) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: add enable_private_service_connect field to Endpoint feat: add id field to DeployedModel feat: add service_attachment field to PrivateEndpoints feat: add endpoint_id to CreateEndpointRequest and method signature to CreateEndpoint feat: add method signature to CreateFeatureStore, CreateEntityType, CreateFeature feat: add network and enable_private_service_connect to IndexEndpoint feat: add service_attachment to IndexPrivateEndpoints feat: add stratified_split field to training_pipeline InputDataConfig fix: remove invalid resource annotations in LineageSubgraph PiperOrigin-RevId: 413686247 Source-Link: https://github.com/googleapis/googleapis/commit/244a89dbd9c9da4b8ada601f0d8131f91f58d0f2 Source-Link: https://github.com/googleapis/googleapis-gen/commit/c485e44a1b2fef516e9bca36514d50cebd5ea51f Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYzQ4NWU0NGExYjJmZWY1MTZlOWJjYTM2NTE0ZDUwY2ViZDVlYTUxZiJ9 * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot --- .../services/migration_service/client.py | 22 +- google/cloud/aiplatform_v1beta1/__init__.py | 12 + .../aiplatform_v1beta1/gapic_metadata.json | 10 + .../services/endpoint_service/async_client.py | 20 +- .../services/endpoint_service/client.py | 20 +- .../featurestore_service/async_client.py | 82 +++++- .../services/featurestore_service/client.py | 82 +++++- .../index_endpoint_service/async_client.py | 99 +++++++ .../services/index_endpoint_service/client.py | 99 +++++++ .../index_endpoint_service/transports/base.py | 14 + .../index_endpoint_service/transports/grpc.py | 29 ++ .../transports/grpc_asyncio.py | 30 +++ .../services/job_service/async_client.py | 3 +- .../services/job_service/client.py | 3 +- .../services/model_service/async_client.py | 11 +- .../services/model_service/client.py | 11 +- .../services/model_service/transports/grpc.py | 11 +- .../model_service/transports/grpc_asyncio.py | 11 +- .../services/pipeline_service/async_client.py | 2 +- .../services/pipeline_service/client.py | 2 +- .../pipeline_service/transports/grpc.py | 2 +- .../transports/grpc_asyncio.py | 2 +- .../prediction_service/async_client.py | 13 +- .../services/prediction_service/client.py | 13 +- .../prediction_service/transports/grpc.py | 13 +- .../transports/grpc_asyncio.py | 13 +- .../services/vizier_service/async_client.py | 12 +- .../services/vizier_service/client.py | 12 +- .../vizier_service/transports/grpc.py | 6 +- .../vizier_service/transports/grpc_asyncio.py | 6 +- .../aiplatform_v1beta1/types/__init__.py | 12 + .../aiplatform_v1beta1/types/artifact.py | 6 +- .../types/batch_prediction_job.py | 23 +- .../aiplatform_v1beta1/types/custom_job.py | 9 +- .../aiplatform_v1beta1/types/endpoint.py | 40 ++- .../types/endpoint_service.py | 11 + .../aiplatform_v1beta1/types/execution.py | 2 +- .../aiplatform_v1beta1/types/explanation.py | 41 +++ .../cloud/aiplatform_v1beta1/types/feature.py | 2 +- .../aiplatform_v1beta1/types/featurestore.py | 5 +- .../types/featurestore_online_service.py | 5 +- .../types/featurestore_service.py | 60 ++++- .../types/index_endpoint.py | 36 ++- .../types/index_endpoint_service.py | 57 ++++ .../aiplatform_v1beta1/types/job_service.py | 2 +- .../types/metadata_schema.py | 3 +- .../types/model_deployment_monitoring_job.py | 7 +- .../types/model_monitoring.py | 23 +- .../aiplatform_v1beta1/types/pipeline_job.py | 19 +- .../types/pipeline_service.py | 2 + .../types/prediction_service.py | 10 +- .../cloud/aiplatform_v1beta1/types/study.py | 32 +-- .../types/training_pipeline.py | 51 ++++ .../types/unmanaged_container_model.py | 52 ++++ .../aiplatform_v1/test_migration_service.py | 40 +-- .../test_endpoint_service.py | 24 +- .../test_featurestore_service.py | 36 ++- .../test_index_endpoint_service.py | 255 ++++++++++++++++++ .../aiplatform_v1beta1/test_job_service.py | 3 + 59 files changed, 1355 insertions(+), 178 deletions(-) create mode 100644 google/cloud/aiplatform_v1beta1/types/unmanaged_container_model.py diff --git a/google/cloud/aiplatform_v1/services/migration_service/client.py b/google/cloud/aiplatform_v1/services/migration_service/client.py index 5a1e5f2c41..2379767e15 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1/services/migration_service/client.py @@ -183,19 +183,16 @@ def parse_annotated_dataset_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def dataset_path(project: str, location: str, dataset: str,) -> str: + def dataset_path(project: str, dataset: str,) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( - project=project, location=location, dataset=dataset, + return "projects/{project}/datasets/{dataset}".format( + project=project, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod @@ -215,16 +212,19 @@ def parse_dataset_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def dataset_path(project: str, dataset: str,) -> str: + def dataset_path(project: str, location: str, dataset: str,) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( - project=project, dataset=dataset, + return "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod diff --git a/google/cloud/aiplatform_v1beta1/__init__.py b/google/cloud/aiplatform_v1beta1/__init__.py index 3c5353af83..801ae9f5b3 100644 --- a/google/cloud/aiplatform_v1beta1/__init__.py +++ b/google/cloud/aiplatform_v1beta1/__init__.py @@ -112,6 +112,7 @@ from .types.event import Event from .types.execution import Execution from .types.explanation import Attribution +from .types.explanation import BlurBaselineConfig from .types.explanation import Explanation from .types.explanation import ExplanationMetadataOverride from .types.explanation import ExplanationParameters @@ -189,6 +190,9 @@ from .types.index_endpoint_service import GetIndexEndpointRequest from .types.index_endpoint_service import ListIndexEndpointsRequest from .types.index_endpoint_service import ListIndexEndpointsResponse +from .types.index_endpoint_service import MutateDeployedIndexOperationMetadata +from .types.index_endpoint_service import MutateDeployedIndexRequest +from .types.index_endpoint_service import MutateDeployedIndexResponse from .types.index_endpoint_service import UndeployIndexOperationMetadata from .types.index_endpoint_service import UndeployIndexRequest from .types.index_endpoint_service import UndeployIndexResponse @@ -447,12 +451,14 @@ from .types.training_pipeline import FractionSplit from .types.training_pipeline import InputDataConfig from .types.training_pipeline import PredefinedSplit +from .types.training_pipeline import StratifiedSplit from .types.training_pipeline import TimestampSplit from .types.training_pipeline import TrainingPipeline from .types.types import BoolArray from .types.types import DoubleArray from .types.types import Int64Array from .types.types import StringArray +from .types.unmanaged_container_model import UnmanagedContainerModel from .types.user_action_reference import UserActionReference from .types.value import Value from .types.vizier_service import AddTrialMeasurementRequest @@ -529,6 +535,7 @@ "BatchReadTensorboardTimeSeriesDataResponse", "BigQueryDestination", "BigQuerySource", + "BlurBaselineConfig", "BoolArray", "CancelBatchPredictionJobRequest", "CancelCustomJobRequest", @@ -813,6 +820,9 @@ "ModelMonitoringObjectiveConfig", "ModelMonitoringStatsAnomalies", "ModelServiceClient", + "MutateDeployedIndexOperationMetadata", + "MutateDeployedIndexRequest", + "MutateDeployedIndexResponse", "NearestNeighborSearchOperationMetadata", "PauseModelDeploymentMonitoringJobRequest", "PipelineJob", @@ -866,6 +876,7 @@ "SpecialistPool", "SpecialistPoolServiceClient", "StopTrialRequest", + "StratifiedSplit", "StreamingReadFeatureValuesRequest", "StringArray", "Study", @@ -895,6 +906,7 @@ "UndeployModelOperationMetadata", "UndeployModelRequest", "UndeployModelResponse", + "UnmanagedContainerModel", "UpdateArtifactRequest", "UpdateContextRequest", "UpdateDatasetRequest", diff --git a/google/cloud/aiplatform_v1beta1/gapic_metadata.json b/google/cloud/aiplatform_v1beta1/gapic_metadata.json index d6469e96ee..b584f16b81 100644 --- a/google/cloud/aiplatform_v1beta1/gapic_metadata.json +++ b/google/cloud/aiplatform_v1beta1/gapic_metadata.json @@ -481,6 +481,11 @@ "list_index_endpoints" ] }, + "MutateDeployedIndex": { + "methods": [ + "mutate_deployed_index" + ] + }, "UndeployIndex": { "methods": [ "undeploy_index" @@ -521,6 +526,11 @@ "list_index_endpoints" ] }, + "MutateDeployedIndex": { + "methods": [ + "mutate_deployed_index" + ] + }, "UndeployIndex": { "methods": [ "undeploy_index" diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py index 41527577f7..9511c58a96 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py @@ -191,6 +191,7 @@ async def create_endpoint( *, parent: str = None, endpoint: gca_endpoint.Endpoint = None, + endpoint_id: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -214,6 +215,21 @@ async def create_endpoint( This corresponds to the ``endpoint`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + endpoint_id (:class:`str`): + Immutable. The ID to use for endpoint, which will become + the final component of the endpoint resource name. If + not provided, Vertex AI will generate a value for this + ID. + + This value should be 1-10 characters, and valid + characters are /[0-9]/. When using HTTP/JSON, this field + is populated based on a query string argument, such as + ``?endpoint_id=12345``. This is the fallback for fields + that are not included in either the URI or the body. + + This corresponds to the ``endpoint_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -231,7 +247,7 @@ async def create_endpoint( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, endpoint]) + has_flattened_params = any([parent, endpoint, endpoint_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -246,6 +262,8 @@ async def create_endpoint( request.parent = parent if endpoint is not None: request.endpoint = endpoint + if endpoint_id is not None: + request.endpoint_id = endpoint_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py index eb4bdaf03c..247fc94dcd 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py @@ -424,6 +424,7 @@ def create_endpoint( *, parent: str = None, endpoint: gca_endpoint.Endpoint = None, + endpoint_id: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -447,6 +448,21 @@ def create_endpoint( This corresponds to the ``endpoint`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + endpoint_id (str): + Immutable. The ID to use for endpoint, which will become + the final component of the endpoint resource name. If + not provided, Vertex AI will generate a value for this + ID. + + This value should be 1-10 characters, and valid + characters are /[0-9]/. When using HTTP/JSON, this field + is populated based on a query string argument, such as + ``?endpoint_id=12345``. This is the fallback for fields + that are not included in either the URI or the body. + + This corresponds to the ``endpoint_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -464,7 +480,7 @@ def create_endpoint( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, endpoint]) + has_flattened_params = any([parent, endpoint, endpoint_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -483,6 +499,8 @@ def create_endpoint( request.parent = parent if endpoint is not None: request.endpoint = endpoint + if endpoint_id is not None: + request.endpoint_id = endpoint_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py index 8038879ee8..cbe4cf700b 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py @@ -198,6 +198,7 @@ async def create_featurestore( *, parent: str = None, featurestore: gca_featurestore.Featurestore = None, + featurestore_id: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -222,6 +223,21 @@ async def create_featurestore( This corresponds to the ``featurestore`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + featurestore_id (:class:`str`): + Required. The ID to use for this Featurestore, which + will become the final component of the Featurestore's + resource name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within the project and + location. + + This corresponds to the ``featurestore_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -232,16 +248,16 @@ async def create_featurestore( google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` - Featurestore configuration information on how the - Featurestore is configured. + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing, + storing, and serving ML features. The Featurestore is + a top-level container for your features and their + values. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, featurestore]) + has_flattened_params = any([parent, featurestore, featurestore_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -256,6 +272,8 @@ async def create_featurestore( request.parent = parent if featurestore is not None: request.featurestore = featurestore + if featurestore_id is not None: + request.featurestore_id = featurestore_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -315,9 +333,11 @@ async def get_featurestore( Returns: google.cloud.aiplatform_v1beta1.types.Featurestore: - Featurestore configuration - information on how the Featurestore is - configured. + Vertex AI Feature Store provides a + centralized repository for organizing, + storing, and serving ML features. The + Featurestore is a top-level container + for your features and their values. """ # Create or coerce a protobuf request object. @@ -490,10 +510,10 @@ async def update_featurestore( google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` - Featurestore configuration information on how the - Featurestore is configured. + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing, + storing, and serving ML features. The Featurestore is + a top-level container for your features and their + values. """ # Create or coerce a protobuf request object. @@ -659,6 +679,7 @@ async def create_entity_type( *, parent: str = None, entity_type: gca_entity_type.EntityType = None, + entity_type_id: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -682,6 +703,20 @@ async def create_entity_type( This corresponds to the ``entity_type`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + entity_type_id (:class:`str`): + Required. The ID to use for the EntityType, which will + become the final component of the EntityType's resource + name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within a featurestore. + + This corresponds to the ``entity_type_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -701,7 +736,7 @@ async def create_entity_type( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, entity_type]) + has_flattened_params = any([parent, entity_type, entity_type_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -716,6 +751,8 @@ async def create_entity_type( request.parent = parent if entity_type is not None: request.entity_type = entity_type + if entity_type_id is not None: + request.entity_type_id = entity_type_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -1115,6 +1152,7 @@ async def create_feature( *, parent: str = None, feature: gca_feature.Feature = None, + feature_id: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -1138,6 +1176,20 @@ async def create_feature( This corresponds to the ``feature`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + feature_id (:class:`str`): + Required. The ID to use for the Feature, which will + become the final component of the Feature's resource + name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within an EntityType. + + This corresponds to the ``feature_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1156,7 +1208,7 @@ async def create_feature( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, feature]) + has_flattened_params = any([parent, feature, feature_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1171,6 +1223,8 @@ async def create_feature( request.parent = parent if feature is not None: request.feature = feature + if feature_id is not None: + request.feature_id = feature_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py index 562cd5a808..a149a9c488 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py @@ -426,6 +426,7 @@ def create_featurestore( *, parent: str = None, featurestore: gca_featurestore.Featurestore = None, + featurestore_id: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -450,6 +451,21 @@ def create_featurestore( This corresponds to the ``featurestore`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + featurestore_id (str): + Required. The ID to use for this Featurestore, which + will become the final component of the Featurestore's + resource name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within the project and + location. + + This corresponds to the ``featurestore_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -460,16 +476,16 @@ def create_featurestore( google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` - Featurestore configuration information on how the - Featurestore is configured. + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing, + storing, and serving ML features. The Featurestore is + a top-level container for your features and their + values. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, featurestore]) + has_flattened_params = any([parent, featurestore, featurestore_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -488,6 +504,8 @@ def create_featurestore( request.parent = parent if featurestore is not None: request.featurestore = featurestore + if featurestore_id is not None: + request.featurestore_id = featurestore_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -543,9 +561,11 @@ def get_featurestore( Returns: google.cloud.aiplatform_v1beta1.types.Featurestore: - Featurestore configuration - information on how the Featurestore is - configured. + Vertex AI Feature Store provides a + centralized repository for organizing, + storing, and serving ML features. The + Featurestore is a top-level container + for your features and their values. """ # Create or coerce a protobuf request object. @@ -718,10 +738,10 @@ def update_featurestore( google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` - Featurestore configuration information on how the - Featurestore is configured. + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing, + storing, and serving ML features. The Featurestore is + a top-level container for your features and their + values. """ # Create or coerce a protobuf request object. @@ -887,6 +907,7 @@ def create_entity_type( *, parent: str = None, entity_type: gca_entity_type.EntityType = None, + entity_type_id: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -910,6 +931,20 @@ def create_entity_type( This corresponds to the ``entity_type`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + entity_type_id (str): + Required. The ID to use for the EntityType, which will + become the final component of the EntityType's resource + name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within a featurestore. + + This corresponds to the ``entity_type_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -929,7 +964,7 @@ def create_entity_type( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, entity_type]) + has_flattened_params = any([parent, entity_type, entity_type_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -948,6 +983,8 @@ def create_entity_type( request.parent = parent if entity_type is not None: request.entity_type = entity_type + if entity_type_id is not None: + request.entity_type_id = entity_type_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -1343,6 +1380,7 @@ def create_feature( *, parent: str = None, feature: gca_feature.Feature = None, + feature_id: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -1366,6 +1404,20 @@ def create_feature( This corresponds to the ``feature`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + feature_id (str): + Required. The ID to use for the Feature, which will + become the final component of the Feature's resource + name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within an EntityType. + + This corresponds to the ``feature_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1384,7 +1436,7 @@ def create_feature( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, feature]) + has_flattened_params = any([parent, feature, feature_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1403,6 +1455,8 @@ def create_feature( request.parent = parent if feature is not None: request.feature = feature + if feature_id is not None: + request.feature_id = feature_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py index 50fe8ae2ad..a1e5aa81b8 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py @@ -798,6 +798,105 @@ async def undeploy_index( # Done; return the response. return response + async def mutate_deployed_index( + self, + request: Union[index_endpoint_service.MutateDeployedIndexRequest, dict] = None, + *, + index_endpoint: str = None, + deployed_index: gca_index_endpoint.DeployedIndex = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Update an existing DeployedIndex under an + IndexEndpoint. + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.MutateDeployedIndexRequest, dict]): + The request object. Request message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex]. + index_endpoint (:class:`str`): + Required. The name of the IndexEndpoint resource into + which to deploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_index (:class:`google.cloud.aiplatform_v1beta1.types.DeployedIndex`): + Required. The DeployedIndex to be updated within the + IndexEndpoint. Currently, the updatable fields are + [DeployedIndex][automatic_resources] and + [DeployedIndex][dedicated_resources] + + This corresponds to the ``deployed_index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.MutateDeployedIndexResponse` + Response message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, deployed_index]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = index_endpoint_service.MutateDeployedIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if deployed_index is not None: + request.deployed_index = deployed_index + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.mutate_deployed_index, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("index_endpoint", request.index_endpoint),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + index_endpoint_service.MutateDeployedIndexResponse, + metadata_type=index_endpoint_service.MutateDeployedIndexOperationMetadata, + ) + + # Done; return the response. + return response + async def __aenter__(self): return self diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py index c93a0cd4e3..49f4dc9da6 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py @@ -1003,6 +1003,105 @@ def undeploy_index( # Done; return the response. return response + def mutate_deployed_index( + self, + request: Union[index_endpoint_service.MutateDeployedIndexRequest, dict] = None, + *, + index_endpoint: str = None, + deployed_index: gca_index_endpoint.DeployedIndex = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Update an existing DeployedIndex under an + IndexEndpoint. + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.MutateDeployedIndexRequest, dict]): + The request object. Request message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex]. + index_endpoint (str): + Required. The name of the IndexEndpoint resource into + which to deploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex): + Required. The DeployedIndex to be updated within the + IndexEndpoint. Currently, the updatable fields are + [DeployedIndex][automatic_resources] and + [DeployedIndex][dedicated_resources] + + This corresponds to the ``deployed_index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.MutateDeployedIndexResponse` + Response message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, deployed_index]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.MutateDeployedIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.MutateDeployedIndexRequest): + request = index_endpoint_service.MutateDeployedIndexRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if deployed_index is not None: + request.deployed_index = deployed_index + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.mutate_deployed_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("index_endpoint", request.index_endpoint),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + index_endpoint_service.MutateDeployedIndexResponse, + metadata_type=index_endpoint_service.MutateDeployedIndexOperationMetadata, + ) + + # Done; return the response. + return response + def __enter__(self): return self diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py index ad234b4d93..729e32879b 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py @@ -152,6 +152,11 @@ def _prep_wrapped_messages(self, client_info): self.undeploy_index: gapic_v1.method.wrap_method( self.undeploy_index, default_timeout=5.0, client_info=client_info, ), + self.mutate_deployed_index: gapic_v1.method.wrap_method( + self.mutate_deployed_index, + default_timeout=None, + client_info=client_info, + ), } def close(self): @@ -237,5 +242,14 @@ def undeploy_index( ]: raise NotImplementedError() + @property + def mutate_deployed_index( + self, + ) -> Callable[ + [index_endpoint_service.MutateDeployedIndexRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + __all__ = ("IndexEndpointServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py index 59eff52bc4..5704bc41f4 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py @@ -446,6 +446,35 @@ def undeploy_index( ) return self._stubs["undeploy_index"] + @property + def mutate_deployed_index( + self, + ) -> Callable[ + [index_endpoint_service.MutateDeployedIndexRequest], operations_pb2.Operation + ]: + r"""Return a callable for the mutate deployed index method over gRPC. + + Update an existing DeployedIndex under an + IndexEndpoint. + + Returns: + Callable[[~.MutateDeployedIndexRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "mutate_deployed_index" not in self._stubs: + self._stubs["mutate_deployed_index"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.IndexEndpointService/MutateDeployedIndex", + request_serializer=index_endpoint_service.MutateDeployedIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["mutate_deployed_index"] + def close(self): self.grpc_channel.close() diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py index 2aa9a4765e..e8b2c2ccaf 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py @@ -454,6 +454,36 @@ def undeploy_index( ) return self._stubs["undeploy_index"] + @property + def mutate_deployed_index( + self, + ) -> Callable[ + [index_endpoint_service.MutateDeployedIndexRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the mutate deployed index method over gRPC. + + Update an existing DeployedIndex under an + IndexEndpoint. + + Returns: + Callable[[~.MutateDeployedIndexRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "mutate_deployed_index" not in self._stubs: + self._stubs["mutate_deployed_index"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.IndexEndpointService/MutateDeployedIndex", + request_serializer=index_endpoint_service.MutateDeployedIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["mutate_deployed_index"] + def close(self): return self.grpc_channel.close() diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py index 4aae09222b..6022a7c16a 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py @@ -63,6 +63,7 @@ from google.cloud.aiplatform_v1beta1.types import model_monitoring from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import study +from google.cloud.aiplatform_v1beta1.types import unmanaged_container_model from google.protobuf import duration_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore from google.protobuf import field_mask_pb2 # type: ignore @@ -1976,7 +1977,7 @@ async def search_model_deployment_monitoring_stats_anomalies( should not be set. deployed_model_id (:class:`str`): Required. The DeployedModel ID of the - [google.cloud.aiplatform.master.ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. + [ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. This corresponds to the ``deployed_model_id`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/client.py b/google/cloud/aiplatform_v1beta1/services/job_service/client.py index 6a3ab9b01c..57d840f33e 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/client.py @@ -66,6 +66,7 @@ from google.cloud.aiplatform_v1beta1.types import model_monitoring from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import study +from google.cloud.aiplatform_v1beta1.types import unmanaged_container_model from google.protobuf import duration_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore from google.protobuf import field_mask_pb2 # type: ignore @@ -2337,7 +2338,7 @@ def search_model_deployment_monitoring_stats_anomalies( should not be set. deployed_model_id (str): Required. The DeployedModel ID of the - [google.cloud.aiplatform.master.ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. + [ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. This corresponds to the ``deployed_model_id`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py index f8a3d626e7..028e8bb41a 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py @@ -528,8 +528,13 @@ async def delete_model( ) -> operation_async.AsyncOperation: r"""Deletes a Model. - Model can only be deleted if there are no [DeployedModels][] - created from it. + A model cannot be deleted if any + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] resource + has a + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + based on the model in its + [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] + field. Args: request (Union[google.cloud.aiplatform_v1beta1.types.DeleteModelRequest, dict]): @@ -623,7 +628,7 @@ async def export_model( timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: - r"""Exports a trained, exportable, Model to a location specified by + r"""Exports a trained, exportable Model to a location specified by the user. A Model is considered to be exportable if it has at least one [supported export format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/client.py b/google/cloud/aiplatform_v1beta1/services/model_service/client.py index 0952ab848e..1ab512337a 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/client.py @@ -780,8 +780,13 @@ def delete_model( ) -> gac_operation.Operation: r"""Deletes a Model. - Model can only be deleted if there are no [DeployedModels][] - created from it. + A model cannot be deleted if any + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] resource + has a + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + based on the model in its + [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] + field. Args: request (Union[google.cloud.aiplatform_v1beta1.types.DeleteModelRequest, dict]): @@ -875,7 +880,7 @@ def export_model( timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gac_operation.Operation: - r"""Exports a trained, exportable, Model to a location specified by + r"""Exports a trained, exportable Model to a location specified by the user. A Model is considered to be exportable if it has at least one [supported export format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py index 5dc6a638aa..4e4d32fe03 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py @@ -356,8 +356,13 @@ def delete_model( Deletes a Model. - Model can only be deleted if there are no [DeployedModels][] - created from it. + A model cannot be deleted if any + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] resource + has a + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + based on the model in its + [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] + field. Returns: Callable[[~.DeleteModelRequest], @@ -383,7 +388,7 @@ def export_model( ) -> Callable[[model_service.ExportModelRequest], operations_pb2.Operation]: r"""Return a callable for the export model method over gRPC. - Exports a trained, exportable, Model to a location specified by + Exports a trained, exportable Model to a location specified by the user. A Model is considered to be exportable if it has at least one [supported export format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py index 0ac844c007..f3fa67b56d 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py @@ -368,8 +368,13 @@ def delete_model( Deletes a Model. - Model can only be deleted if there are no [DeployedModels][] - created from it. + A model cannot be deleted if any + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] resource + has a + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + based on the model in its + [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] + field. Returns: Callable[[~.DeleteModelRequest], @@ -397,7 +402,7 @@ def export_model( ]: r"""Return a callable for the export model method over gRPC. - Exports a trained, exportable, Model to a location specified by + Exports a trained, exportable Model to a location specified by the user. A Model is considered to be exportable if it has at least one [supported export format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py index fdcd22c07c..c31048b9eb 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py @@ -57,7 +57,7 @@ class PipelineServiceAsyncClient: """A service for creating and managing Vertex AI's pipelines. This includes both ``TrainingPipeline`` resources (used for AutoML and - custom training) and ``PipelineJob`` resources (used for Vertex + custom training) and ``PipelineJob`` resources (used for Vertex AI Pipelines). """ diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py index 593e2b7edb..35c2ffab76 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py @@ -93,7 +93,7 @@ def get_transport_class(cls, label: str = None,) -> Type[PipelineServiceTranspor class PipelineServiceClient(metaclass=PipelineServiceClientMeta): """A service for creating and managing Vertex AI's pipelines. This includes both ``TrainingPipeline`` resources (used for AutoML and - custom training) and ``PipelineJob`` resources (used for Vertex + custom training) and ``PipelineJob`` resources (used for Vertex AI Pipelines). """ diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py index 01002e8b08..372e193e47 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py @@ -42,7 +42,7 @@ class PipelineServiceGrpcTransport(PipelineServiceTransport): A service for creating and managing Vertex AI's pipelines. This includes both ``TrainingPipeline`` resources (used for AutoML and - custom training) and ``PipelineJob`` resources (used for Vertex + custom training) and ``PipelineJob`` resources (used for Vertex AI Pipelines). This class defines the same methods as the primary client, so the diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py index 20e90dd7fa..d5c7e82b30 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py @@ -43,7 +43,7 @@ class PipelineServiceGrpcAsyncIOTransport(PipelineServiceTransport): A service for creating and managing Vertex AI's pipelines. This includes both ``TrainingPipeline`` resources (used for AutoML and - custom training) and ``PipelineJob`` resources (used for Vertex + custom training) and ``PipelineJob`` resources (used for Vertex AI Pipelines). This class defines the same methods as the primary client, so the diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py index 85bd6291ce..fd0677b922 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py @@ -286,8 +286,17 @@ async def raw_predict( timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> httpbody_pb2.HttpBody: - r"""Perform an online prediction with arbitrary http - payload. + r"""Perform an online prediction with an arbitrary HTTP payload. + + The response includes the following HTTP headers: + + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that + served this prediction. + + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + that served this prediction. Args: request (Union[google.cloud.aiplatform_v1beta1.types.RawPredictRequest, dict]): diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py index d8e8694054..020817c986 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py @@ -494,8 +494,17 @@ def raw_predict( timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> httpbody_pb2.HttpBody: - r"""Perform an online prediction with arbitrary http - payload. + r"""Perform an online prediction with an arbitrary HTTP payload. + + The response includes the following HTTP headers: + + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that + served this prediction. + + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + that served this prediction. Args: request (Union[google.cloud.aiplatform_v1beta1.types.RawPredictRequest, dict]): diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py index 298ab7d052..e911abba1e 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py @@ -260,8 +260,17 @@ def raw_predict( ) -> Callable[[prediction_service.RawPredictRequest], httpbody_pb2.HttpBody]: r"""Return a callable for the raw predict method over gRPC. - Perform an online prediction with arbitrary http - payload. + Perform an online prediction with an arbitrary HTTP payload. + + The response includes the following HTTP headers: + + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that + served this prediction. + + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + that served this prediction. Returns: Callable[[~.RawPredictRequest], diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py index af62a6feb0..4c288670dc 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py @@ -265,8 +265,17 @@ def raw_predict( ]: r"""Return a callable for the raw predict method over gRPC. - Perform an online prediction with arbitrary http - payload. + Perform an online prediction with an arbitrary HTTP payload. + + The response includes the following HTTP headers: + + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that + served this prediction. + + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + that served this prediction. Returns: Callable[[~.RawPredictRequest], diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py index aa54f6bab5..9ba5f46b14 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py @@ -44,8 +44,8 @@ class VizierServiceAsyncClient: - """Vertex Vizier API. - Vizier service is a GCP service to solve blackbox optimization + """Vertex AI Vizier API. + Vertex AI Vizier is a service to solve blackbox optimization problems, such as tuning machine learning hyperparameters and searching over deep learning architectures. """ @@ -219,7 +219,9 @@ async def create_study( Returns: google.cloud.aiplatform_v1beta1.types.Study: + LINT.IfChange A message representing a Study. + """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have @@ -290,7 +292,9 @@ async def get_study( Returns: google.cloud.aiplatform_v1beta1.types.Study: + LINT.IfChange A message representing a Study. + """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have @@ -508,7 +512,9 @@ async def lookup_study( Returns: google.cloud.aiplatform_v1beta1.types.Study: + LINT.IfChange A message representing a Study. + """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have @@ -556,7 +562,7 @@ async def suggest_trials( metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Adds one or more Trials to a Study, with parameter values - suggested by Vertex Vizier. Returns a long-running operation + suggested by Vertex AI Vizier. Returns a long-running operation associated with the generation of Trial suggestions. When this long-running operation succeeds, it will contain a [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py index 818462a4ad..94f0134400 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py @@ -78,8 +78,8 @@ def get_transport_class(cls, label: str = None,) -> Type[VizierServiceTransport] class VizierServiceClient(metaclass=VizierServiceClientMeta): - """Vertex Vizier API. - Vizier service is a GCP service to solve blackbox optimization + """Vertex AI Vizier API. + Vertex AI Vizier is a service to solve blackbox optimization problems, such as tuning machine learning hyperparameters and searching over deep learning architectures. """ @@ -437,7 +437,9 @@ def create_study( Returns: google.cloud.aiplatform_v1beta1.types.Study: + LINT.IfChange A message representing a Study. + """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have @@ -508,7 +510,9 @@ def get_study( Returns: google.cloud.aiplatform_v1beta1.types.Study: + LINT.IfChange A message representing a Study. + """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have @@ -726,7 +730,9 @@ def lookup_study( Returns: google.cloud.aiplatform_v1beta1.types.Study: + LINT.IfChange A message representing a Study. + """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have @@ -774,7 +780,7 @@ def suggest_trials( metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Adds one or more Trials to a Study, with parameter values - suggested by Vertex Vizier. Returns a long-running operation + suggested by Vertex AI Vizier. Returns a long-running operation associated with the generation of Trial suggestions. When this long-running operation succeeds, it will contain a [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py index 0c4d387596..5cf7cbaee1 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py @@ -36,8 +36,8 @@ class VizierServiceGrpcTransport(VizierServiceTransport): """gRPC backend transport for VizierService. - Vertex Vizier API. - Vizier service is a GCP service to solve blackbox optimization + Vertex AI Vizier API. + Vertex AI Vizier is a service to solve blackbox optimization problems, such as tuning machine learning hyperparameters and searching over deep learning architectures. @@ -388,7 +388,7 @@ def suggest_trials( r"""Return a callable for the suggest trials method over gRPC. Adds one or more Trials to a Study, with parameter values - suggested by Vertex Vizier. Returns a long-running operation + suggested by Vertex AI Vizier. Returns a long-running operation associated with the generation of Trial suggestions. When this long-running operation succeeds, it will contain a [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py index 2168a033d2..7fdb740e87 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py @@ -37,8 +37,8 @@ class VizierServiceGrpcAsyncIOTransport(VizierServiceTransport): """gRPC AsyncIO backend transport for VizierService. - Vertex Vizier API. - Vizier service is a GCP service to solve blackbox optimization + Vertex AI Vizier API. + Vertex AI Vizier is a service to solve blackbox optimization problems, such as tuning machine learning hyperparameters and searching over deep learning architectures. @@ -397,7 +397,7 @@ def suggest_trials( r"""Return a callable for the suggest trials method over gRPC. Adds one or more Trials to a Study, with parameter values - suggested by Vertex Vizier. Returns a long-running operation + suggested by Vertex AI Vizier. Returns a long-running operation associated with the generation of Trial suggestions. When this long-running operation succeeds, it will contain a [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. diff --git a/google/cloud/aiplatform_v1beta1/types/__init__.py b/google/cloud/aiplatform_v1beta1/types/__init__.py index b5ce0f36fd..fcd3ca5a42 100644 --- a/google/cloud/aiplatform_v1beta1/types/__init__.py +++ b/google/cloud/aiplatform_v1beta1/types/__init__.py @@ -88,6 +88,7 @@ from .execution import Execution from .explanation import ( Attribution, + BlurBaselineConfig, Explanation, ExplanationMetadataOverride, ExplanationParameters, @@ -175,6 +176,9 @@ GetIndexEndpointRequest, ListIndexEndpointsRequest, ListIndexEndpointsResponse, + MutateDeployedIndexOperationMetadata, + MutateDeployedIndexRequest, + MutateDeployedIndexResponse, UndeployIndexOperationMetadata, UndeployIndexRequest, UndeployIndexResponse, @@ -461,6 +465,7 @@ FractionSplit, InputDataConfig, PredefinedSplit, + StratifiedSplit, TimestampSplit, TrainingPipeline, ) @@ -470,6 +475,7 @@ Int64Array, StringArray, ) +from .unmanaged_container_model import UnmanagedContainerModel from .user_action_reference import UserActionReference from .value import Value from .vizier_service import ( @@ -561,6 +567,7 @@ "Event", "Execution", "Attribution", + "BlurBaselineConfig", "Explanation", "ExplanationMetadataOverride", "ExplanationParameters", @@ -638,6 +645,9 @@ "GetIndexEndpointRequest", "ListIndexEndpointsRequest", "ListIndexEndpointsResponse", + "MutateDeployedIndexOperationMetadata", + "MutateDeployedIndexRequest", + "MutateDeployedIndexResponse", "UndeployIndexOperationMetadata", "UndeployIndexRequest", "UndeployIndexResponse", @@ -888,12 +898,14 @@ "FractionSplit", "InputDataConfig", "PredefinedSplit", + "StratifiedSplit", "TimestampSplit", "TrainingPipeline", "BoolArray", "DoubleArray", "Int64Array", "StringArray", + "UnmanagedContainerModel", "UserActionReference", "Value", "AddTrialMeasurementRequest", diff --git a/google/cloud/aiplatform_v1beta1/types/artifact.py b/google/cloud/aiplatform_v1beta1/types/artifact.py index 31657f0f31..d70d6ace76 100644 --- a/google/cloud/aiplatform_v1beta1/types/artifact.py +++ b/google/cloud/aiplatform_v1beta1/types/artifact.py @@ -62,9 +62,9 @@ class Artifact(proto.Message): The state of this Artifact. This is a property of the Artifact, and does not imply or capture any ongoing process. This property is - managed by clients (such as Vertex Pipelines), - and the system does not prescribe or check the - validity of state transitions. + managed by clients (such as Vertex AI + Pipelines), and the system does not prescribe or + check the validity of state transitions. schema_title (str): The title of the schema describing the metadata. diff --git a/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py b/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py index 0f8d9cb573..80829d444e 100644 --- a/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py +++ b/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py @@ -26,6 +26,9 @@ from google.cloud.aiplatform_v1beta1.types import ( manual_batch_tuning_parameters as gca_manual_batch_tuning_parameters, ) +from google.cloud.aiplatform_v1beta1.types import ( + unmanaged_container_model as gca_unmanaged_container_model, +) from google.protobuf import struct_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from google.rpc import status_pb2 # type: ignore @@ -53,11 +56,16 @@ class BatchPredictionJob(proto.Message): Required. The user-defined name of this BatchPredictionJob. model (str): - Required. The name of the Model that produces - the predictions via this job, must share the - same ancestor Location. Starting this job has no - impact on any existing deployments of the Model - and their resources. + The name of the Model resoure that produces the predictions + via this job, must share the same ancestor Location. + Starting this job has no impact on any existing deployments + of the Model and their resources. Exactly one of model and + unmanaged_container_model must be set. + unmanaged_container_model (google.cloud.aiplatform_v1beta1.types.UnmanagedContainerModel): + Contains model information necessary to perform batch + prediction without requiring uploading to model registry. + Exactly one of model and unmanaged_container_model must be + set. input_config (google.cloud.aiplatform_v1beta1.types.BatchPredictionJob.InputConfig): Required. Input configuration of the instances on which predictions are performed. The schema of any single instance @@ -362,6 +370,11 @@ class OutputInfo(proto.Message): name = proto.Field(proto.STRING, number=1,) display_name = proto.Field(proto.STRING, number=2,) model = proto.Field(proto.STRING, number=3,) + unmanaged_container_model = proto.Field( + proto.MESSAGE, + number=28, + message=gca_unmanaged_container_model.UnmanagedContainerModel, + ) input_config = proto.Field(proto.MESSAGE, number=4, message=InputConfig,) model_parameters = proto.Field(proto.MESSAGE, number=5, message=struct_pb2.Value,) output_config = proto.Field(proto.MESSAGE, number=6, message=OutputConfig,) diff --git a/google/cloud/aiplatform_v1beta1/types/custom_job.py b/google/cloud/aiplatform_v1beta1/types/custom_job.py index dbd7c960d7..7ec2a92c03 100644 --- a/google/cloud/aiplatform_v1beta1/types/custom_job.py +++ b/google/cloud/aiplatform_v1beta1/types/custom_job.py @@ -147,9 +147,12 @@ class CustomJobSpec(proto.Message): {project} is a project number, as in ``12345``, and {network} is a network name. - Private services access must already be configured for the - network. If left unspecified, the job is not peered with any - network. + To specify this field, you must have already `configured VPC + Network Peering for Vertex + AI `__. + + If this field is left unspecified, the job is not peered + with any network. base_output_directory (google.cloud.aiplatform_v1beta1.types.GcsDestination): The Cloud Storage location to store the output of this CustomJob or HyperparameterTuningJob. For diff --git a/google/cloud/aiplatform_v1beta1/types/endpoint.py b/google/cloud/aiplatform_v1beta1/types/endpoint.py index 7f04d9907c..4e6981e44e 100644 --- a/google/cloud/aiplatform_v1beta1/types/endpoint.py +++ b/google/cloud/aiplatform_v1beta1/types/endpoint.py @@ -85,17 +85,31 @@ class Endpoint(proto.Message): this key. network (str): The full name of the Google Compute Engine - `network `__ + `network `__ to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. + Only one of the fields, + [network][google.cloud.aiplatform.v1beta1.Endpoint.network] + or + [enable_private_service_connect][google.cloud.aiplatform.v1beta1.Endpoint.enable_private_service_connect], + can be set. + `Format `__: - projects/{project}/global/networks/{network}. Where - {project} is a project number, as in '12345', and {network} - is network name. + ``projects/{project}/global/networks/{network}``. Where + ``{project}`` is a project number, as in ``12345``, and + ``{network}`` is network name. + enable_private_service_connect (bool): + If true, expose the Endpoint via private service connect. + + Only one of the fields, + [network][google.cloud.aiplatform.v1beta1.Endpoint.network] + or + [enable_private_service_connect][google.cloud.aiplatform.v1beta1.Endpoint.enable_private_service_connect], + can be set. model_deployment_monitoring_job (str): Output only. Resource name of the Model Monitoring job associated with this Endpoint if monitoring is enabled by @@ -118,6 +132,7 @@ class Endpoint(proto.Message): proto.MESSAGE, number=10, message=gca_encryption_spec.EncryptionSpec, ) network = proto.Field(proto.STRING, number=13,) + enable_private_service_connect = proto.Field(proto.BOOL, number=17,) model_deployment_monitoring_job = proto.Field(proto.STRING, number=14,) @@ -146,7 +161,11 @@ class DeployedModel(proto.Message): This field is a member of `oneof`_ ``prediction_resources``. id (str): - Output only. The ID of the DeployedModel. + Immutable. The ID of the DeployedModel. If not provided upon + deployment, Vertex AI will generate a value for this ID. + + This value should be 1-10 characters, and valid characters + are /[0-9]/. model (str): Required. The name of the Model that this is the deployment of. Note that the Model may be in @@ -239,8 +258,10 @@ class DeployedModel(proto.Message): class PrivateEndpoints(proto.Message): - r"""PrivateEndpoints is used to provide paths for users to send - requests via private services access. + r"""PrivateEndpoints proto is used to provide paths for users to send + requests privately. To send request via private service access, use + predict_http_uri, explain_http_uri or health_http_uri. To send + request via private service connect, use service_attachment. Attributes: predict_http_uri (str): @@ -252,11 +273,16 @@ class PrivateEndpoints(proto.Message): health_http_uri (str): Output only. Http(s) path to send health check requests. + service_attachment (str): + Output only. The name of the service + attachment resource. Populated if private + service connect is enabled. """ predict_http_uri = proto.Field(proto.STRING, number=1,) explain_http_uri = proto.Field(proto.STRING, number=2,) health_http_uri = proto.Field(proto.STRING, number=3,) + service_attachment = proto.Field(proto.STRING, number=4,) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/endpoint_service.py b/google/cloud/aiplatform_v1beta1/types/endpoint_service.py index 94b28acec2..a21d1ca933 100644 --- a/google/cloud/aiplatform_v1beta1/types/endpoint_service.py +++ b/google/cloud/aiplatform_v1beta1/types/endpoint_service.py @@ -51,10 +51,21 @@ class CreateEndpointRequest(proto.Message): ``projects/{project}/locations/{location}`` endpoint (google.cloud.aiplatform_v1beta1.types.Endpoint): Required. The Endpoint to create. + endpoint_id (str): + Immutable. The ID to use for endpoint, which will become the + final component of the endpoint resource name. If not + provided, Vertex AI will generate a value for this ID. + + This value should be 1-10 characters, and valid characters + are /[0-9]/. When using HTTP/JSON, this field is populated + based on a query string argument, such as + ``?endpoint_id=12345``. This is the fallback for fields that + are not included in either the URI or the body. """ parent = proto.Field(proto.STRING, number=1,) endpoint = proto.Field(proto.MESSAGE, number=2, message=gca_endpoint.Endpoint,) + endpoint_id = proto.Field(proto.STRING, number=4,) class CreateEndpointOperationMetadata(proto.Message): diff --git a/google/cloud/aiplatform_v1beta1/types/execution.py b/google/cloud/aiplatform_v1beta1/types/execution.py index 3dd91ffa6b..85b824ac50 100644 --- a/google/cloud/aiplatform_v1beta1/types/execution.py +++ b/google/cloud/aiplatform_v1beta1/types/execution.py @@ -38,7 +38,7 @@ class Execution(proto.Message): The state of this Execution. This is a property of the Execution, and does not imply or capture any ongoing process. This property is - managed by clients (such as Vertex Pipelines) + managed by clients (such as Vertex AI Pipelines) and the system does not prescribe or check the validity of state transitions. etag (str): diff --git a/google/cloud/aiplatform_v1beta1/types/explanation.py b/google/cloud/aiplatform_v1beta1/types/explanation.py index 4d55149e34..2972aa2183 100644 --- a/google/cloud/aiplatform_v1beta1/types/explanation.py +++ b/google/cloud/aiplatform_v1beta1/types/explanation.py @@ -33,6 +33,7 @@ "XraiAttribution", "SmoothGradConfig", "FeatureNoiseSigma", + "BlurBaselineConfig", "Similarity", "ExplanationSpecOverride", "ExplanationMetadataOverride", @@ -389,12 +390,22 @@ class IntegratedGradientsAttribution(proto.Message): help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf + blur_baseline_config (google.cloud.aiplatform_v1beta1.types.BlurBaselineConfig): + Config for IG with blur baseline. + When enabled, a linear path from the maximally + blurred image to the input image is created. + Using a blurred baseline instead of zero (black + image) is motivated by the BlurIG approach + explained here: https://arxiv.org/abs/2004.03383 """ step_count = proto.Field(proto.INT32, number=1,) smooth_grad_config = proto.Field( proto.MESSAGE, number=2, message="SmoothGradConfig", ) + blur_baseline_config = proto.Field( + proto.MESSAGE, number=3, message="BlurBaselineConfig", + ) class XraiAttribution(proto.Message): @@ -422,12 +433,22 @@ class XraiAttribution(proto.Message): help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf + blur_baseline_config (google.cloud.aiplatform_v1beta1.types.BlurBaselineConfig): + Config for XRAI with blur baseline. + When enabled, a linear path from the maximally + blurred image to the input image is created. + Using a blurred baseline instead of zero (black + image) is motivated by the BlurIG approach + explained here: https://arxiv.org/abs/2004.03383 """ step_count = proto.Field(proto.INT32, number=1,) smooth_grad_config = proto.Field( proto.MESSAGE, number=2, message="SmoothGradConfig", ) + blur_baseline_config = proto.Field( + proto.MESSAGE, number=3, message="BlurBaselineConfig", + ) class SmoothGradConfig(proto.Message): @@ -528,6 +549,26 @@ class NoiseSigmaForFeature(proto.Message): ) +class BlurBaselineConfig(proto.Message): + r"""Config for blur baseline. + When enabled, a linear path from the maximally blurred image to + the input image is created. Using a blurred baseline instead of + zero (black image) is motivated by the BlurIG approach explained + here: + https://arxiv.org/abs/2004.03383 + + Attributes: + max_blur_sigma (float): + The standard deviation of the blur kernel for + the blurred baseline. The same blurring + parameter is used for both the height and the + width dimension. If not set, the method defaults + to the zero (i.e. black for images) baseline. + """ + + max_blur_sigma = proto.Field(proto.FLOAT, number=1,) + + class Similarity(proto.Message): r"""Similarity explainability that returns the nearest neighbors from the provided dataset. diff --git a/google/cloud/aiplatform_v1beta1/types/feature.py b/google/cloud/aiplatform_v1beta1/types/feature.py index 1d8ec6500f..7e056694fe 100644 --- a/google/cloud/aiplatform_v1beta1/types/feature.py +++ b/google/cloud/aiplatform_v1beta1/types/feature.py @@ -85,7 +85,7 @@ class Feature(proto.Message): the EntityType's this Feature belongs to. monitoring_stats (Sequence[google.cloud.aiplatform_v1beta1.types.FeatureStatsAnomaly]): Output only. A list of historical [Snapshot - Analysis][google.cloud.aiplatform.master.FeaturestoreMonitoringConfig.SnapshotAnalysis] + Analysis][FeaturestoreMonitoringConfig.SnapshotAnalysis] stats requested by user, sorted by [FeatureStatsAnomaly.start_time][google.cloud.aiplatform.v1beta1.FeatureStatsAnomaly.start_time] descending. diff --git a/google/cloud/aiplatform_v1beta1/types/featurestore.py b/google/cloud/aiplatform_v1beta1/types/featurestore.py index a15904b9af..203dfc4a68 100644 --- a/google/cloud/aiplatform_v1beta1/types/featurestore.py +++ b/google/cloud/aiplatform_v1beta1/types/featurestore.py @@ -25,8 +25,9 @@ class Featurestore(proto.Message): - r"""Featurestore configuration information on how the - Featurestore is configured. + r"""Vertex AI Feature Store provides a centralized repository for + organizing, storing, and serving ML features. The Featurestore + is a top-level container for your features and their values. Attributes: name (str): diff --git a/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py b/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py index 12b03262a9..25a42f0bff 100644 --- a/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py +++ b/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py @@ -258,7 +258,10 @@ class Metadata(proto.Message): is provided by user at feature ingestion time. If not, feature store will use the system timestamp when the data is ingested into feature - store. + store. For streaming ingestion, the time, + aligned by days, must be no older than five + years (1825 days) and no later than one year + (366 days) in the future. """ generate_time = proto.Field( diff --git a/google/cloud/aiplatform_v1beta1/types/featurestore_service.py b/google/cloud/aiplatform_v1beta1/types/featurestore_service.py index 9076501474..f5c20abdd4 100644 --- a/google/cloud/aiplatform_v1beta1/types/featurestore_service.py +++ b/google/cloud/aiplatform_v1beta1/types/featurestore_service.py @@ -509,13 +509,22 @@ class ExportFeatureValuesRequest(proto.Message): r"""Request message for [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: snapshot_export (google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest.SnapshotExport): - Exports Feature values of all entities of the - EntityType as of a snapshot time. + Exports the latest Feature values of all + entities of the EntityType within a time range. + + This field is a member of `oneof`_ ``mode``. + full_export (google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest.FullExport): + Exports all historical values of all entities + of the EntityType within a time range This field is a member of `oneof`_ ``mode``. entity_type (str): @@ -533,8 +542,8 @@ class ExportFeatureValuesRequest(proto.Message): """ class SnapshotExport(proto.Message): - r"""Describes exporting Feature values as of the snapshot - timestamp. + r"""Describes exporting the latest Feature values of all entities of the + EntityType between [start_time, snapshot_time]. Attributes: snapshot_time (google.protobuf.timestamp_pb2.Timestamp): @@ -542,15 +551,52 @@ class SnapshotExport(proto.Message): If not set, retrieve values as of now. Timestamp, if present, must not have higher than millisecond precision. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Excludes Feature values with feature + generation timestamp before this timestamp. If + not set, retrieve oldest values kept in Feature + Store. Timestamp, if present, must not have + higher than millisecond precision. """ snapshot_time = proto.Field( proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp, ) + start_time = proto.Field( + proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, + ) + + class FullExport(proto.Message): + r"""Describes exporting all historical Feature values of all entities of + the EntityType between [start_time, end_time]. + + Attributes: + start_time (google.protobuf.timestamp_pb2.Timestamp): + Excludes Feature values with feature + generation timestamp before this timestamp. If + not set, retrieve oldest values kept in Feature + Store. Timestamp, if present, must not have + higher than millisecond precision. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Exports Feature values as of this timestamp. + If not set, retrieve values as of now. + Timestamp, if present, must not have higher than + millisecond precision. + """ + + start_time = proto.Field( + proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp, + ) snapshot_export = proto.Field( proto.MESSAGE, number=3, oneof="mode", message=SnapshotExport, ) + full_export = proto.Field( + proto.MESSAGE, number=7, oneof="mode", message=FullExport, + ) entity_type = proto.Field(proto.STRING, number=1,) destination = proto.Field( proto.MESSAGE, number=4, message="FeatureValueDestination", @@ -1216,17 +1262,17 @@ class UpdateFeaturestoreOperationMetadata(proto.Message): class ImportFeatureValuesOperationMetadata(proto.Message): - r"""Details of operations that perform import feature values. + r"""Details of operations that perform import Feature values. Attributes: generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): Operation metadata for Featurestore import - feature values. + Feature values. imported_entity_count (int): Number of entities that have been imported by the operation. imported_feature_value_count (int): - Number of feature values that have been + Number of Feature values that have been imported by the operation. invalid_row_count (int): The number of rows in input source that weren't imported due diff --git a/google/cloud/aiplatform_v1beta1/types/index_endpoint.py b/google/cloud/aiplatform_v1beta1/types/index_endpoint.py index 0739c547c8..c423957375 100644 --- a/google/cloud/aiplatform_v1beta1/types/index_endpoint.py +++ b/google/cloud/aiplatform_v1beta1/types/index_endpoint.py @@ -73,8 +73,7 @@ class IndexEndpoint(proto.Message): of the original Indexes they are the deployments of. network (str): - Required. Immutable. The full name of the Google Compute - Engine + Optional. The full name of the Google Compute Engine `network `__ to which the IndexEndpoint should be peered. @@ -82,10 +81,25 @@ class IndexEndpoint(proto.Message): network. If left unspecified, the Endpoint is not peered with any network. + Only one of the fields, + [network][google.cloud.aiplatform.v1beta1.IndexEndpoint.network] + or + [enable_private_service_connect][google.cloud.aiplatform.v1beta1.IndexEndpoint.enable_private_service_connect], + can be set. + `Format `__: projects/{project}/global/networks/{network}. Where {project} is a project number, as in '12345', and {network} is network name. + enable_private_service_connect (bool): + Optional. If true, expose the IndexEndpoint via private + service connect. + + Only one of the fields, + [network][google.cloud.aiplatform.v1beta1.IndexEndpoint.network] + or + [enable_private_service_connect][google.cloud.aiplatform.v1beta1.IndexEndpoint.enable_private_service_connect], + can be set. """ name = proto.Field(proto.STRING, number=1,) @@ -99,6 +113,7 @@ class IndexEndpoint(proto.Message): create_time = proto.Field(proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,) update_time = proto.Field(proto.MESSAGE, number=8, message=timestamp_pb2.Timestamp,) network = proto.Field(proto.STRING, number=9,) + enable_private_service_connect = proto.Field(proto.BOOL, number=10,) class DeployedIndex(proto.Message): @@ -152,11 +167,10 @@ class DeployedIndex(proto.Message): Optional. A description of resources that the DeployedIndex uses, which to large degree are decided by Vertex AI, and optionally allows only a modest additional configuration. If - min_replica_count is not set, the default value is 1. If + min_replica_count is not set, the default value is 2 (we + don't provide SLA when min_replica_count=1). If max_replica_count is not set, the default value is min_replica_count. The max allowed replica count is 1000. - The user is billed for the resources (at least their minimal - amount) even if the DeployedIndex receives no traffic. enable_access_logging (bool): Optional. If true, private endpoint's access logs are sent to StackDriver Logging. @@ -256,16 +270,24 @@ class AuthProvider(proto.Message): class IndexPrivateEndpoints(proto.Message): - r"""IndexPrivateEndpoints proto is used to provide paths for - users to send requests via private services access. + r"""IndexPrivateEndpoints proto is used to provide paths for users to + send requests via private endpoints (e.g. private service access, + private service connect). To send request via private service + access, use match_grpc_address. To send request via private service + connect, use service_attachment. Attributes: match_grpc_address (str): Output only. The ip address used to send match gRPC requests. + service_attachment (str): + Output only. The name of the service + attachment resource. Populated if private + service connect is enabled. """ match_grpc_address = proto.Field(proto.STRING, number=1,) + service_attachment = proto.Field(proto.STRING, number=2,) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py b/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py index 69840b8899..fa8928ca4f 100644 --- a/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py +++ b/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py @@ -36,6 +36,9 @@ "UndeployIndexRequest", "UndeployIndexResponse", "UndeployIndexOperationMetadata", + "MutateDeployedIndexRequest", + "MutateDeployedIndexResponse", + "MutateDeployedIndexOperationMetadata", }, ) @@ -289,4 +292,58 @@ class UndeployIndexOperationMetadata(proto.Message): ) +class MutateDeployedIndexRequest(proto.Message): + r"""Request message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex]. + + Attributes: + index_endpoint (str): + Required. The name of the IndexEndpoint resource into which + to deploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex): + Required. The DeployedIndex to be updated within the + IndexEndpoint. Currently, the updatable fields are + [DeployedIndex][automatic_resources] and + [DeployedIndex][dedicated_resources] + """ + + index_endpoint = proto.Field(proto.STRING, number=1,) + deployed_index = proto.Field( + proto.MESSAGE, number=2, message=gca_index_endpoint.DeployedIndex, + ) + + +class MutateDeployedIndexResponse(proto.Message): + r"""Response message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex]. + + Attributes: + deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex): + The DeployedIndex that had been updated in + the IndexEndpoint. + """ + + deployed_index = proto.Field( + proto.MESSAGE, number=1, message=gca_index_endpoint.DeployedIndex, + ) + + +class MutateDeployedIndexOperationMetadata(proto.Message): + r"""Runtime operation information for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + deployed_index_id (str): + The unique index id specified by user + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + deployed_index_id = proto.Field(proto.STRING, number=2,) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/job_service.py b/google/cloud/aiplatform_v1beta1/types/job_service.py index 4d34ca0100..0988a81f3c 100644 --- a/google/cloud/aiplatform_v1beta1/types/job_service.py +++ b/google/cloud/aiplatform_v1beta1/types/job_service.py @@ -633,7 +633,7 @@ class SearchModelDeploymentMonitoringStatsAnomaliesRequest(proto.Message): \`projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job} deployed_model_id (str): Required. The DeployedModel ID of the - [google.cloud.aiplatform.master.ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. + [ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. feature_display_name (str): The feature display name. If specified, only return the stats belonging to this feature. Format: diff --git a/google/cloud/aiplatform_v1beta1/types/metadata_schema.py b/google/cloud/aiplatform_v1beta1/types/metadata_schema.py index 34cca83420..69642111fb 100644 --- a/google/cloud/aiplatform_v1beta1/types/metadata_schema.py +++ b/google/cloud/aiplatform_v1beta1/types/metadata_schema.py @@ -34,7 +34,8 @@ class MetadataSchema(proto.Message): The version of the MetadataSchema. The version's format must match the following regular expression: ``^[0-9]+[.][0-9]+[.][0-9]+$``, which would allow to - order/compare different versions.Example: 1.0.0, 1.0.1, etc. + order/compare different versions. Example: 1.0.0, 1.0.1, + etc. schema (str): Required. The raw YAML string representation of the MetadataSchema. The combination of [MetadataSchema.version] diff --git a/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py b/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py index 5bf590b7c1..b9a106c639 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py +++ b/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py @@ -162,9 +162,10 @@ class ModelDeploymentMonitoringJob(proto.Message): resources of this ModelDeploymentMonitoringJob will be secured by this key. enable_monitoring_pipeline_logs (bool): - If true, the scheduled monitoring pipeline status logs are - sent to Google Cloud Logging. Please note the logs incur - cost, which are subject to `Cloud Logging + If true, the scheduled monitoring pipeline logs are sent to + Google Cloud Logging, including pipeline status and + anomalies detected. Please note the logs incur cost, which + are subject to `Cloud Logging pricing `__. error (google.rpc.status_pb2.Status): Output only. Only populated when the job's state is diff --git a/google/cloud/aiplatform_v1beta1/types/model_monitoring.py b/google/cloud/aiplatform_v1beta1/types/model_monitoring.py index 05937fd16b..760ac63c2f 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_monitoring.py +++ b/google/cloud/aiplatform_v1beta1/types/model_monitoring.py @@ -44,8 +44,8 @@ class ModelMonitoringObjectiveConfig(proto.Message): prediction_drift_detection_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig): The config for drift of prediction data. explanation_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.ExplanationConfig): - The config for integrated with Explainable - AI. + The config for integrating with Vertex + Explainable AI. """ class TrainingDataset(proto.Message): @@ -160,14 +160,14 @@ class PredictionDriftDetectionConfig(proto.Message): ) class ExplanationConfig(proto.Message): - r"""The config for integrated with Explainable AI. Only applicable if - the Model has explanation_spec populated. + r"""The config for integrating with Vertex Explainable AI. Only + applicable if the Model has explanation_spec populated. Attributes: enable_feature_attributes (bool): - If want to analyze the Explainable AI feature - attribute scores or not. If set to true, Vertex - AI will log the feature attributions from + If want to analyze the Vertex Explainable AI + feature attribute scores or not. If set to true, + Vertex AI will log the feature attributions from explain response and do the skew/drift detection for them. explanation_baseline (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.ExplanationConfig.ExplanationBaseline): @@ -246,7 +246,7 @@ class PredictionFormat(proto.Enum): class ModelMonitoringAlertConfig(proto.Message): - r"""Next ID: 2 + r"""Next ID: 3 .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -255,6 +255,12 @@ class ModelMonitoringAlertConfig(proto.Message): Email alert config. This field is a member of `oneof`_ ``alert``. + enable_logging (bool): + Dump the anomalies to Cloud Logging. The anomalies will be + put to json payload encoded from proto + [google.cloud.aiplatform.logging.ModelMonitoringAnomaliesLogEntry][]. + This can be further sinked to Pub/Sub or any other services + supported by Cloud Logging. """ class EmailAlertConfig(proto.Message): @@ -270,6 +276,7 @@ class EmailAlertConfig(proto.Message): email_alert_config = proto.Field( proto.MESSAGE, number=1, oneof="alert", message=EmailAlertConfig, ) + enable_logging = proto.Field(proto.BOOL, number=2,) class ThresholdConfig(proto.Message): diff --git a/google/cloud/aiplatform_v1beta1/types/pipeline_job.py b/google/cloud/aiplatform_v1beta1/types/pipeline_job.py index 58e8dce723..c9c7d6d861 100644 --- a/google/cloud/aiplatform_v1beta1/types/pipeline_job.py +++ b/google/cloud/aiplatform_v1beta1/types/pipeline_job.py @@ -116,11 +116,16 @@ class RuntimeConfig(proto.Message): Attributes: parameters (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineJob.RuntimeConfig.ParametersEntry]): - Deprecated. Use [RuntimeConfig.parameter_values] instead. - The runtime parameters of the PipelineJob. The parameters - will be passed into + Deprecated. Use + [RuntimeConfig.parameter_values][google.cloud.aiplatform.v1beta1.PipelineJob.RuntimeConfig.parameter_values] + instead. The runtime parameters of the PipelineJob. The + parameters will be passed into [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec] - to replace the placeholders at runtime. + to replace the placeholders at runtime. This field is used + by pipelines built using + ``PipelineJob.pipeline_spec.schema_version`` 2.0.0 or lower, + such as pipelines built using Kubeflow Pipelines SDK 1.8 or + lower. gcs_output_directory (str): Required. A path in a Cloud Storage bucket, which will be treated as the root output directory of the pipeline. It is @@ -134,7 +139,11 @@ class RuntimeConfig(proto.Message): The runtime parameters of the PipelineJob. The parameters will be passed into [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec] - to replace the placeholders at runtime. + to replace the placeholders at runtime. This field is used + by pipelines built using + ``PipelineJob.pipeline_spec.schema_version`` 2.1.0, such as + pipelines built using Kubeflow Pipelines SDK 1.9 or higher + and the v2 DSL. """ parameters = proto.MapField( diff --git a/google/cloud/aiplatform_v1beta1/types/pipeline_service.py b/google/cloud/aiplatform_v1beta1/types/pipeline_service.py index e2dc3139b6..902bee0ba8 100644 --- a/google/cloud/aiplatform_v1beta1/types/pipeline_service.py +++ b/google/cloud/aiplatform_v1beta1/types/pipeline_service.py @@ -229,6 +229,7 @@ class ListPipelineJobsRequest(proto.Message): comparisons, and ``:`` wildcard. for example, can check if pipeline's display_name contains *step* by doing display_name:"*step*" + - ``state``: Supports ``=`` and ``!=`` comparisons. - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 format. @@ -278,6 +279,7 @@ class ListPipelineJobsRequest(proto.Message): - ``create_time`` - ``update_time`` - ``end_time`` + - ``start_time`` """ parent = proto.Field(proto.STRING, number=1,) diff --git a/google/cloud/aiplatform_v1beta1/types/prediction_service.py b/google/cloud/aiplatform_v1beta1/types/prediction_service.py index 4b258f6fa6..45c6ac3206 100644 --- a/google/cloud/aiplatform_v1beta1/types/prediction_service.py +++ b/google/cloud/aiplatform_v1beta1/types/prediction_service.py @@ -83,14 +83,14 @@ class PredictResponse(proto.Message): ID of the Endpoint's DeployedModel that served this prediction. model (str): - Output only. The name of the Model this - DeployedModel, that served this prediction, was - created from. + Output only. The resource name of the Model + which is deployed as the DeployedModel that this + prediction hits. model_display_name (str): Output only. The [display name][google.cloud.aiplatform.v1beta1.Model.display_name] of - the Model this DeployedModel, that served this prediction, - was created from. + the Model which is deployed as the DeployedModel that this + prediction hits. """ predictions = proto.RepeatedField( diff --git a/google/cloud/aiplatform_v1beta1/types/study.py b/google/cloud/aiplatform_v1beta1/types/study.py index 7c46137bee..77032803f9 100644 --- a/google/cloud/aiplatform_v1beta1/types/study.py +++ b/google/cloud/aiplatform_v1beta1/types/study.py @@ -27,7 +27,8 @@ class Study(proto.Message): - r"""A message representing a Study. + r"""LINT.IfChange + A message representing a Study. Attributes: name (str): @@ -97,13 +98,14 @@ class Trial(proto.Message): client_id (str): Output only. The identifier of the client that originally requested this Trial. Each client is identified by a unique - client_id. When a client asks for a suggestion, Vizier will - assign it a Trial. The client should evaluate the Trial, - complete it, and report back to Vizier. If suggestion is - asked again by same client_id before the Trial is completed, - the same Trial will be returned. Multiple clients with - different client_ids can ask for suggestions simultaneously, - each of them will get their own Trial. + client_id. When a client asks for a suggestion, Vertex AI + Vizier will assign it a Trial. The client should evaluate + the Trial, complete it, and report back to Vertex AI Vizier. + If suggestion is asked again by same client_id before the + Trial is completed, the same Trial will be returned. + Multiple clients with different client_ids can ask for + suggestions simultaneously, each of them will get their own + Trial. infeasible_reason (str): Output only. A human readable string describing why the Trial is infeasible. This is set only if Trial state is @@ -208,9 +210,9 @@ class StudySpec(proto.Message): The search algorithm specified for the Study. observation_noise (google.cloud.aiplatform_v1beta1.types.StudySpec.ObservationNoise): The observation noise level of the study. - Currently only supported by the Vizier service. - Not supported by HyperparamterTuningJob or - TrainingPipeline. + Currently only supported by the Vertex AI Vizier + service. Not supported by HyperparamterTuningJob + or TrainingPipeline. measurement_selection_type (google.cloud.aiplatform_v1beta1.types.StudySpec.MeasurementSelectionType): Describe which measurement selection type will be used @@ -335,8 +337,8 @@ class DoubleValueSpec(proto.Message): to be a relatively good starting point. Unset value signals that there is no offered starting point. - Currently only supported by the Vizier service. Not - supported by HyperparamterTuningJob or TrainingPipeline. + Currently only supported by the Vertex AI Vizier service. + Not supported by HyperparamterTuningJob or TrainingPipeline. This field is a member of `oneof`_ ``_default_value``. """ @@ -360,8 +362,8 @@ class IntegerValueSpec(proto.Message): to be a relatively good starting point. Unset value signals that there is no offered starting point. - Currently only supported by the Vizier service. Not - supported by HyperparamterTuningJob or TrainingPipeline. + Currently only supported by the Vertex AI Vizier service. + Not supported by HyperparamterTuningJob or TrainingPipeline. This field is a member of `oneof`_ ``_default_value``. """ diff --git a/google/cloud/aiplatform_v1beta1/types/training_pipeline.py b/google/cloud/aiplatform_v1beta1/types/training_pipeline.py index 4ee0f8265c..6b9ee8c4dc 100644 --- a/google/cloud/aiplatform_v1beta1/types/training_pipeline.py +++ b/google/cloud/aiplatform_v1beta1/types/training_pipeline.py @@ -33,6 +33,7 @@ "FilterSplit", "PredefinedSplit", "TimestampSplit", + "StratifiedSplit", }, ) @@ -207,6 +208,12 @@ class InputDataConfig(proto.Message): Split based on the timestamp of the input data pieces. + This field is a member of `oneof`_ ``split``. + stratified_split (google.cloud.aiplatform_v1beta1.types.StratifiedSplit): + Supported only for tabular Datasets. + Split based on the distribution of the specified + column. + This field is a member of `oneof`_ ``split``. gcs_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): The Cloud Storage location where the training data is to be @@ -323,6 +330,9 @@ class InputDataConfig(proto.Message): timestamp_split = proto.Field( proto.MESSAGE, number=5, oneof="split", message="TimestampSplit", ) + stratified_split = proto.Field( + proto.MESSAGE, number=12, oneof="split", message="StratifiedSplit", + ) gcs_destination = proto.Field( proto.MESSAGE, number=8, oneof="destination", message=io.GcsDestination, ) @@ -457,4 +467,45 @@ class TimestampSplit(proto.Message): key = proto.Field(proto.STRING, number=4,) +class StratifiedSplit(proto.Message): + r"""Assigns input data to the training, validation, and test sets so + that the distribution of values found in the categorical column (as + specified by the ``key`` field) is mirrored within each split. The + fraction values determine the relative sizes of the splits. + + For example, if the specified column has three values, with 50% of + the rows having value "A", 25% value "B", and 25% value "C", and the + split fractions are specified as 80/10/10, then the training set + will constitute 80% of the training data, with about 50% of the + training set rows having the value "A" for the specified column, + about 25% having the value "B", and about 25% having the value "C". + + Only the top 500 occurring values are used; any values not in the + top 500 values are randomly assigned to a split. If less than three + rows contain a specific value, those rows are randomly assigned. + + Supported only for tabular Datasets. + + Attributes: + training_fraction (float): + The fraction of the input data that is to be + used to train the Model. + validation_fraction (float): + The fraction of the input data that is to be + used to validate the Model. + test_fraction (float): + The fraction of the input data that is to be + used to evaluate the Model. + key (str): + Required. The key is a name of one of the + Dataset's data columns. The key provided must be + for a categorical column. + """ + + training_fraction = proto.Field(proto.DOUBLE, number=1,) + validation_fraction = proto.Field(proto.DOUBLE, number=2,) + test_fraction = proto.Field(proto.DOUBLE, number=3,) + key = proto.Field(proto.STRING, number=4,) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/unmanaged_container_model.py b/google/cloud/aiplatform_v1beta1/types/unmanaged_container_model.py new file mode 100644 index 0000000000..42ecf09fae --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/unmanaged_container_model.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import model + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", manifest={"UnmanagedContainerModel",}, +) + + +class UnmanagedContainerModel(proto.Message): + r"""Contains model information necessary to perform batch + prediction without requiring a full model import. + + Attributes: + artifact_uri (str): + The path to the directory containing the + Model artifact and any of its supporting files. + predict_schemata (google.cloud.aiplatform_v1beta1.types.PredictSchemata): + Contains the schemata used in Model's + predictions and explanations + container_spec (google.cloud.aiplatform_v1beta1.types.ModelContainerSpec): + Input only. The specification of the + container that is to be used when deploying this + Model. + """ + + artifact_uri = proto.Field(proto.STRING, number=1,) + predict_schemata = proto.Field( + proto.MESSAGE, number=2, message=model.PredictSchemata, + ) + container_spec = proto.Field( + proto.MESSAGE, number=3, message=model.ModelContainerSpec, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/tests/unit/gapic/aiplatform_v1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1/test_migration_service.py index 3b1821e8d7..bdc0284aa3 100644 --- a/tests/unit/gapic/aiplatform_v1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_migration_service.py @@ -1624,20 +1624,18 @@ def test_parse_annotated_dataset_path(): def test_dataset_path(): project = "cuttlefish" - location = "mussel" - dataset = "winkle" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( - project=project, location=location, dataset=dataset, + dataset = "mussel" + expected = "projects/{project}/datasets/{dataset}".format( + project=project, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "nautilus", - "location": "scallop", - "dataset": "abalone", + "project": "winkle", + "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected) @@ -1647,9 +1645,9 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "squid" - location = "clam" - dataset = "whelk" + project = "scallop" + location = "abalone" + dataset = "squid" expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, location=location, dataset=dataset, ) @@ -1659,9 +1657,9 @@ def test_dataset_path(): def test_parse_dataset_path(): expected = { - "project": "octopus", - "location": "oyster", - "dataset": "nudibranch", + "project": "clam", + "location": "whelk", + "dataset": "octopus", } path = MigrationServiceClient.dataset_path(**expected) @@ -1671,18 +1669,20 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "cuttlefish" - dataset = "mussel" - expected = "projects/{project}/datasets/{dataset}".format( - project=project, dataset=dataset, + project = "oyster" + location = "nudibranch" + dataset = "cuttlefish" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "winkle", + "project": "mussel", + "location": "winkle", "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py index c069fb4b6a..7c59752cf7 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py @@ -649,7 +649,9 @@ def test_create_endpoint_flattened(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.create_endpoint( - parent="parent_value", endpoint=gca_endpoint.Endpoint(name="name_value"), + parent="parent_value", + endpoint=gca_endpoint.Endpoint(name="name_value"), + endpoint_id="endpoint_id_value", ) # Establish that the underlying call was made with the expected @@ -662,6 +664,9 @@ def test_create_endpoint_flattened(): arg = args[0].endpoint mock_val = gca_endpoint.Endpoint(name="name_value") assert arg == mock_val + arg = args[0].endpoint_id + mock_val = "endpoint_id_value" + assert arg == mock_val def test_create_endpoint_flattened_error(): @@ -674,6 +679,7 @@ def test_create_endpoint_flattened_error(): endpoint_service.CreateEndpointRequest(), parent="parent_value", endpoint=gca_endpoint.Endpoint(name="name_value"), + endpoint_id="endpoint_id_value", ) @@ -694,7 +700,9 @@ async def test_create_endpoint_flattened_async(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.create_endpoint( - parent="parent_value", endpoint=gca_endpoint.Endpoint(name="name_value"), + parent="parent_value", + endpoint=gca_endpoint.Endpoint(name="name_value"), + endpoint_id="endpoint_id_value", ) # Establish that the underlying call was made with the expected @@ -707,6 +715,9 @@ async def test_create_endpoint_flattened_async(): arg = args[0].endpoint mock_val = gca_endpoint.Endpoint(name="name_value") assert arg == mock_val + arg = args[0].endpoint_id + mock_val = "endpoint_id_value" + assert arg == mock_val @pytest.mark.asyncio @@ -722,6 +733,7 @@ async def test_create_endpoint_flattened_error_async(): endpoint_service.CreateEndpointRequest(), parent="parent_value", endpoint=gca_endpoint.Endpoint(name="name_value"), + endpoint_id="endpoint_id_value", ) @@ -745,6 +757,7 @@ def test_get_endpoint( description="description_value", etag="etag_value", network="network_value", + enable_private_service_connect=True, model_deployment_monitoring_job="model_deployment_monitoring_job_value", ) response = client.get_endpoint(request) @@ -761,6 +774,7 @@ def test_get_endpoint( assert response.description == "description_value" assert response.etag == "etag_value" assert response.network == "network_value" + assert response.enable_private_service_connect is True assert ( response.model_deployment_monitoring_job == "model_deployment_monitoring_job_value" @@ -808,6 +822,7 @@ async def test_get_endpoint_async( description="description_value", etag="etag_value", network="network_value", + enable_private_service_connect=True, model_deployment_monitoring_job="model_deployment_monitoring_job_value", ) ) @@ -825,6 +840,7 @@ async def test_get_endpoint_async( assert response.description == "description_value" assert response.etag == "etag_value" assert response.network == "network_value" + assert response.enable_private_service_connect is True assert ( response.model_deployment_monitoring_job == "model_deployment_monitoring_job_value" @@ -1337,6 +1353,7 @@ def test_update_endpoint( description="description_value", etag="etag_value", network="network_value", + enable_private_service_connect=True, model_deployment_monitoring_job="model_deployment_monitoring_job_value", ) response = client.update_endpoint(request) @@ -1353,6 +1370,7 @@ def test_update_endpoint( assert response.description == "description_value" assert response.etag == "etag_value" assert response.network == "network_value" + assert response.enable_private_service_connect is True assert ( response.model_deployment_monitoring_job == "model_deployment_monitoring_job_value" @@ -1400,6 +1418,7 @@ async def test_update_endpoint_async( description="description_value", etag="etag_value", network="network_value", + enable_private_service_connect=True, model_deployment_monitoring_job="model_deployment_monitoring_job_value", ) ) @@ -1417,6 +1436,7 @@ async def test_update_endpoint_async( assert response.description == "description_value" assert response.etag == "etag_value" assert response.network == "network_value" + assert response.enable_private_service_connect is True assert ( response.model_deployment_monitoring_job == "model_deployment_monitoring_job_value" diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py index b91a29b022..392021dcb0 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py @@ -684,6 +684,7 @@ def test_create_featurestore_flattened(): client.create_featurestore( parent="parent_value", featurestore=gca_featurestore.Featurestore(name="name_value"), + featurestore_id="featurestore_id_value", ) # Establish that the underlying call was made with the expected @@ -696,6 +697,9 @@ def test_create_featurestore_flattened(): arg = args[0].featurestore mock_val = gca_featurestore.Featurestore(name="name_value") assert arg == mock_val + arg = args[0].featurestore_id + mock_val = "featurestore_id_value" + assert arg == mock_val def test_create_featurestore_flattened_error(): @@ -710,6 +714,7 @@ def test_create_featurestore_flattened_error(): featurestore_service.CreateFeaturestoreRequest(), parent="parent_value", featurestore=gca_featurestore.Featurestore(name="name_value"), + featurestore_id="featurestore_id_value", ) @@ -734,6 +739,7 @@ async def test_create_featurestore_flattened_async(): response = await client.create_featurestore( parent="parent_value", featurestore=gca_featurestore.Featurestore(name="name_value"), + featurestore_id="featurestore_id_value", ) # Establish that the underlying call was made with the expected @@ -746,6 +752,9 @@ async def test_create_featurestore_flattened_async(): arg = args[0].featurestore mock_val = gca_featurestore.Featurestore(name="name_value") assert arg == mock_val + arg = args[0].featurestore_id + mock_val = "featurestore_id_value" + assert arg == mock_val @pytest.mark.asyncio @@ -761,6 +770,7 @@ async def test_create_featurestore_flattened_error_async(): featurestore_service.CreateFeaturestoreRequest(), parent="parent_value", featurestore=gca_featurestore.Featurestore(name="name_value"), + featurestore_id="featurestore_id_value", ) @@ -2035,6 +2045,7 @@ def test_create_entity_type_flattened(): client.create_entity_type( parent="parent_value", entity_type=gca_entity_type.EntityType(name="name_value"), + entity_type_id="entity_type_id_value", ) # Establish that the underlying call was made with the expected @@ -2047,6 +2058,9 @@ def test_create_entity_type_flattened(): arg = args[0].entity_type mock_val = gca_entity_type.EntityType(name="name_value") assert arg == mock_val + arg = args[0].entity_type_id + mock_val = "entity_type_id_value" + assert arg == mock_val def test_create_entity_type_flattened_error(): @@ -2061,6 +2075,7 @@ def test_create_entity_type_flattened_error(): featurestore_service.CreateEntityTypeRequest(), parent="parent_value", entity_type=gca_entity_type.EntityType(name="name_value"), + entity_type_id="entity_type_id_value", ) @@ -2085,6 +2100,7 @@ async def test_create_entity_type_flattened_async(): response = await client.create_entity_type( parent="parent_value", entity_type=gca_entity_type.EntityType(name="name_value"), + entity_type_id="entity_type_id_value", ) # Establish that the underlying call was made with the expected @@ -2097,6 +2113,9 @@ async def test_create_entity_type_flattened_async(): arg = args[0].entity_type mock_val = gca_entity_type.EntityType(name="name_value") assert arg == mock_val + arg = args[0].entity_type_id + mock_val = "entity_type_id_value" + assert arg == mock_val @pytest.mark.asyncio @@ -2112,6 +2131,7 @@ async def test_create_entity_type_flattened_error_async(): featurestore_service.CreateEntityTypeRequest(), parent="parent_value", entity_type=gca_entity_type.EntityType(name="name_value"), + entity_type_id="entity_type_id_value", ) @@ -3364,7 +3384,9 @@ def test_create_feature_flattened(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.create_feature( - parent="parent_value", feature=gca_feature.Feature(name="name_value"), + parent="parent_value", + feature=gca_feature.Feature(name="name_value"), + feature_id="feature_id_value", ) # Establish that the underlying call was made with the expected @@ -3377,6 +3399,9 @@ def test_create_feature_flattened(): arg = args[0].feature mock_val = gca_feature.Feature(name="name_value") assert arg == mock_val + arg = args[0].feature_id + mock_val = "feature_id_value" + assert arg == mock_val def test_create_feature_flattened_error(): @@ -3391,6 +3416,7 @@ def test_create_feature_flattened_error(): featurestore_service.CreateFeatureRequest(), parent="parent_value", feature=gca_feature.Feature(name="name_value"), + feature_id="feature_id_value", ) @@ -3411,7 +3437,9 @@ async def test_create_feature_flattened_async(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.create_feature( - parent="parent_value", feature=gca_feature.Feature(name="name_value"), + parent="parent_value", + feature=gca_feature.Feature(name="name_value"), + feature_id="feature_id_value", ) # Establish that the underlying call was made with the expected @@ -3424,6 +3452,9 @@ async def test_create_feature_flattened_async(): arg = args[0].feature mock_val = gca_feature.Feature(name="name_value") assert arg == mock_val + arg = args[0].feature_id + mock_val = "feature_id_value" + assert arg == mock_val @pytest.mark.asyncio @@ -3439,6 +3470,7 @@ async def test_create_feature_flattened_error_async(): featurestore_service.CreateFeatureRequest(), parent="parent_value", feature=gca_feature.Feature(name="name_value"), + feature_id="feature_id_value", ) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py index 052d411dff..9e96140fe4 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py @@ -778,6 +778,7 @@ def test_get_index_endpoint( description="description_value", etag="etag_value", network="network_value", + enable_private_service_connect=True, ) response = client.get_index_endpoint(request) @@ -793,6 +794,7 @@ def test_get_index_endpoint( assert response.description == "description_value" assert response.etag == "etag_value" assert response.network == "network_value" + assert response.enable_private_service_connect is True def test_get_index_endpoint_from_dict(): @@ -841,6 +843,7 @@ async def test_get_index_endpoint_async( description="description_value", etag="etag_value", network="network_value", + enable_private_service_connect=True, ) ) response = await client.get_index_endpoint(request) @@ -857,6 +860,7 @@ async def test_get_index_endpoint_async( assert response.description == "description_value" assert response.etag == "etag_value" assert response.network == "network_value" + assert response.enable_private_service_connect is True @pytest.mark.asyncio @@ -1436,6 +1440,7 @@ def test_update_index_endpoint( description="description_value", etag="etag_value", network="network_value", + enable_private_service_connect=True, ) response = client.update_index_endpoint(request) @@ -1451,6 +1456,7 @@ def test_update_index_endpoint( assert response.description == "description_value" assert response.etag == "etag_value" assert response.network == "network_value" + assert response.enable_private_service_connect is True def test_update_index_endpoint_from_dict(): @@ -1499,6 +1505,7 @@ async def test_update_index_endpoint_async( description="description_value", etag="etag_value", network="network_value", + enable_private_service_connect=True, ) ) response = await client.update_index_endpoint(request) @@ -1515,6 +1522,7 @@ async def test_update_index_endpoint_async( assert response.description == "description_value" assert response.etag == "etag_value" assert response.network == "network_value" + assert response.enable_private_service_connect is True @pytest.mark.asyncio @@ -2371,6 +2379,252 @@ async def test_undeploy_index_flattened_error_async(): ) +def test_mutate_deployed_index( + transport: str = "grpc", + request_type=index_endpoint_service.MutateDeployedIndexRequest, +): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.mutate_deployed_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.MutateDeployedIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_mutate_deployed_index_from_dict(): + test_mutate_deployed_index(request_type=dict) + + +def test_mutate_deployed_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), "__call__" + ) as call: + client.mutate_deployed_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.MutateDeployedIndexRequest() + + +@pytest.mark.asyncio +async def test_mutate_deployed_index_async( + transport: str = "grpc_asyncio", + request_type=index_endpoint_service.MutateDeployedIndexRequest, +): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.mutate_deployed_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.MutateDeployedIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_mutate_deployed_index_async_from_dict(): + await test_mutate_deployed_index_async(request_type=dict) + + +def test_mutate_deployed_index_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.MutateDeployedIndexRequest() + + request.index_endpoint = "index_endpoint/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.mutate_deployed_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "index_endpoint=index_endpoint/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_mutate_deployed_index_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.MutateDeployedIndexRequest() + + request.index_endpoint = "index_endpoint/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.mutate_deployed_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "index_endpoint=index_endpoint/value",) in kw[ + "metadata" + ] + + +def test_mutate_deployed_index_flattened(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.mutate_deployed_index( + index_endpoint="index_endpoint_value", + deployed_index=gca_index_endpoint.DeployedIndex(id="id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].index_endpoint + mock_val = "index_endpoint_value" + assert arg == mock_val + arg = args[0].deployed_index + mock_val = gca_index_endpoint.DeployedIndex(id="id_value") + assert arg == mock_val + + +def test_mutate_deployed_index_flattened_error(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.mutate_deployed_index( + index_endpoint_service.MutateDeployedIndexRequest(), + index_endpoint="index_endpoint_value", + deployed_index=gca_index_endpoint.DeployedIndex(id="id_value"), + ) + + +@pytest.mark.asyncio +async def test_mutate_deployed_index_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.mutate_deployed_index( + index_endpoint="index_endpoint_value", + deployed_index=gca_index_endpoint.DeployedIndex(id="id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].index_endpoint + mock_val = "index_endpoint_value" + assert arg == mock_val + arg = args[0].deployed_index + mock_val = gca_index_endpoint.DeployedIndex(id="id_value") + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_mutate_deployed_index_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.mutate_deployed_index( + index_endpoint_service.MutateDeployedIndexRequest(), + index_endpoint="index_endpoint_value", + deployed_index=gca_index_endpoint.DeployedIndex(id="id_value"), + ) + + def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.IndexEndpointServiceGrpcTransport( @@ -2477,6 +2731,7 @@ def test_index_endpoint_service_base_transport(): "delete_index_endpoint", "deploy_index", "undeploy_index", + "mutate_deployed_index", ) for method in methods: with pytest.raises(NotImplementedError): diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py index 9ed6e4f841..4538fb5fbe 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py @@ -51,6 +51,7 @@ data_labeling_job as gca_data_labeling_job, ) from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import env_var from google.cloud.aiplatform_v1beta1.types import explanation from google.cloud.aiplatform_v1beta1.types import explanation_metadata from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job @@ -62,6 +63,7 @@ from google.cloud.aiplatform_v1beta1.types import job_state from google.cloud.aiplatform_v1beta1.types import machine_resources from google.cloud.aiplatform_v1beta1.types import manual_batch_tuning_parameters +from google.cloud.aiplatform_v1beta1.types import model from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job from google.cloud.aiplatform_v1beta1.types import ( model_deployment_monitoring_job as gca_model_deployment_monitoring_job, @@ -69,6 +71,7 @@ from google.cloud.aiplatform_v1beta1.types import model_monitoring from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import study +from google.cloud.aiplatform_v1beta1.types import unmanaged_container_model from google.longrunning import operations_pb2 from google.oauth2 import service_account from google.protobuf import any_pb2 # type: ignore