From f3a3d03c8509dc49c24139155a572dacbe954f66 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 14 Jul 2021 19:30:26 +0000 Subject: [PATCH] feat: Removes AcceleratorType.TPU_V2 and TPU_V3 constants (#543) PiperOrigin-RevId: 384714314 Source-Link: https://github.com/googleapis/googleapis/commit/cc51e4818f5f928f34e801252cc11db73e097933 Source-Link: https://github.com/googleapis/googleapis-gen/commit/f87b91c30a523fee5d51a206bc6ff385ff551c18 feat: Adds AcceleratorType.NVIDIA_TESLA_A100 constant feat: Adds BigQuery output table field to batch prediction job output config feat: Adds JobState.JOB_STATE_EXPIRED constant feat: Adds AutoscalingMetricSpec message feat: Adds PipelineService methods for Create, Get, List, Delete, Cancel feat: Adds fields to Study message --- google/cloud/aiplatform_v1/__init__.py | 30 + .../cloud/aiplatform_v1/gapic_metadata.json | 50 + .../services/job_service/async_client.py | 2 + .../services/job_service/client.py | 15 + .../services/pipeline_service/async_client.py | 432 +++++ .../services/pipeline_service/client.py | 528 ++++++ .../services/pipeline_service/pagers.py | 129 ++ .../pipeline_service/transports/base.py | 65 + .../pipeline_service/transports/grpc.py | 152 ++ .../transports/grpc_asyncio.py | 157 ++ .../prediction_service/async_client.py | 26 +- .../services/prediction_service/client.py | 30 +- google/cloud/aiplatform_v1/types/__init__.py | 32 + .../aiplatform_v1/types/accelerator_type.py | 3 +- google/cloud/aiplatform_v1/types/artifact.py | 88 + .../types/batch_prediction_job.py | 12 +- google/cloud/aiplatform_v1/types/context.py | 74 + google/cloud/aiplatform_v1/types/execution.py | 87 + google/cloud/aiplatform_v1/types/io.py | 1 + google/cloud/aiplatform_v1/types/job_state.py | 1 + .../aiplatform_v1/types/machine_resources.py | 74 +- .../cloud/aiplatform_v1/types/pipeline_job.py | 311 ++++ .../aiplatform_v1/types/pipeline_service.py | 179 +- google/cloud/aiplatform_v1/types/study.py | 66 + google/cloud/aiplatform_v1/types/value.py | 38 + .../gapic/aiplatform_v1/test_job_service.py | 62 +- .../aiplatform_v1/test_pipeline_service.py | 1512 ++++++++++++++++- .../aiplatform_v1/test_prediction_service.py | 4 +- 28 files changed, 4052 insertions(+), 108 deletions(-) create mode 100644 google/cloud/aiplatform_v1/types/artifact.py create mode 100644 google/cloud/aiplatform_v1/types/context.py create mode 100644 google/cloud/aiplatform_v1/types/execution.py create mode 100644 google/cloud/aiplatform_v1/types/pipeline_job.py create mode 100644 google/cloud/aiplatform_v1/types/value.py diff --git a/google/cloud/aiplatform_v1/__init__.py b/google/cloud/aiplatform_v1/__init__.py index d765cc599d..f5207bb46d 100644 --- a/google/cloud/aiplatform_v1/__init__.py +++ b/google/cloud/aiplatform_v1/__init__.py @@ -34,8 +34,10 @@ from .types.accelerator_type import AcceleratorType from .types.annotation import Annotation from .types.annotation_spec import AnnotationSpec +from .types.artifact import Artifact from .types.batch_prediction_job import BatchPredictionJob from .types.completion_stats import CompletionStats +from .types.context import Context from .types.custom_job import ContainerSpec from .types.custom_job import CustomJob from .types.custom_job import CustomJobSpec @@ -86,6 +88,7 @@ from .types.endpoint_service import UndeployModelResponse from .types.endpoint_service import UpdateEndpointRequest from .types.env_var import EnvVar +from .types.execution import Execution from .types.hyperparameter_tuning_job import HyperparameterTuningJob from .types.io import BigQueryDestination from .types.io import BigQuerySource @@ -118,6 +121,7 @@ from .types.job_service import ListHyperparameterTuningJobsResponse from .types.job_state import JobState from .types.machine_resources import AutomaticResources +from .types.machine_resources import AutoscalingMetricSpec from .types.machine_resources import BatchDedicatedResources from .types.machine_resources import DedicatedResources from .types.machine_resources import DiskSpec @@ -157,10 +161,20 @@ from .types.model_service import UploadModelResponse from .types.operation import DeleteOperationMetadata from .types.operation import GenericOperationMetadata +from .types.pipeline_job import PipelineJob +from .types.pipeline_job import PipelineJobDetail +from .types.pipeline_job import PipelineTaskDetail +from .types.pipeline_job import PipelineTaskExecutorDetail +from .types.pipeline_service import CancelPipelineJobRequest from .types.pipeline_service import CancelTrainingPipelineRequest +from .types.pipeline_service import CreatePipelineJobRequest from .types.pipeline_service import CreateTrainingPipelineRequest +from .types.pipeline_service import DeletePipelineJobRequest from .types.pipeline_service import DeleteTrainingPipelineRequest +from .types.pipeline_service import GetPipelineJobRequest from .types.pipeline_service import GetTrainingPipelineRequest +from .types.pipeline_service import ListPipelineJobsRequest +from .types.pipeline_service import ListPipelineJobsResponse from .types.pipeline_service import ListTrainingPipelinesRequest from .types.pipeline_service import ListTrainingPipelinesResponse from .types.pipeline_state import PipelineState @@ -185,6 +199,7 @@ from .types.training_pipeline import TimestampSplit from .types.training_pipeline import TrainingPipeline from .types.user_action_reference import UserActionReference +from .types.value import Value __all__ = ( "DatasetServiceAsyncClient", @@ -199,7 +214,9 @@ "ActiveLearningConfig", "Annotation", "AnnotationSpec", + "Artifact", "AutomaticResources", + "AutoscalingMetricSpec", "BatchDedicatedResources", "BatchMigrateResourcesOperationMetadata", "BatchMigrateResourcesRequest", @@ -211,10 +228,12 @@ "CancelCustomJobRequest", "CancelDataLabelingJobRequest", "CancelHyperparameterTuningJobRequest", + "CancelPipelineJobRequest", "CancelTrainingPipelineRequest", "CompletionStats", "ContainerRegistryDestination", "ContainerSpec", + "Context", "CreateBatchPredictionJobRequest", "CreateCustomJobRequest", "CreateDataLabelingJobRequest", @@ -223,6 +242,7 @@ "CreateEndpointOperationMetadata", "CreateEndpointRequest", "CreateHyperparameterTuningJobRequest", + "CreatePipelineJobRequest", "CreateSpecialistPoolOperationMetadata", "CreateSpecialistPoolRequest", "CreateTrainingPipelineRequest", @@ -241,6 +261,7 @@ "DeleteHyperparameterTuningJobRequest", "DeleteModelRequest", "DeleteOperationMetadata", + "DeletePipelineJobRequest", "DeleteSpecialistPoolRequest", "DeleteTrainingPipelineRequest", "DeployModelOperationMetadata", @@ -253,6 +274,7 @@ "Endpoint", "EndpointServiceClient", "EnvVar", + "Execution", "ExportDataConfig", "ExportDataOperationMetadata", "ExportDataRequest", @@ -275,6 +297,7 @@ "GetModelEvaluationRequest", "GetModelEvaluationSliceRequest", "GetModelRequest", + "GetPipelineJobRequest", "GetSpecialistPoolRequest", "GetTrainingPipelineRequest", "HyperparameterTuningJob", @@ -307,6 +330,8 @@ "ListModelEvaluationsResponse", "ListModelsRequest", "ListModelsResponse", + "ListPipelineJobsRequest", + "ListPipelineJobsResponse", "ListSpecialistPoolsRequest", "ListSpecialistPoolsResponse", "ListTrainingPipelinesRequest", @@ -323,8 +348,12 @@ "ModelEvaluation", "ModelEvaluationSlice", "ModelServiceClient", + "PipelineJob", + "PipelineJobDetail", "PipelineServiceClient", "PipelineState", + "PipelineTaskDetail", + "PipelineTaskExecutorDetail", "Port", "PredefinedSplit", "PredictRequest", @@ -356,5 +385,6 @@ "UploadModelRequest", "UploadModelResponse", "UserActionReference", + "Value", "WorkerPoolSpec", ) diff --git a/google/cloud/aiplatform_v1/gapic_metadata.json b/google/cloud/aiplatform_v1/gapic_metadata.json index 0abed0fd70..4b22c95676 100644 --- a/google/cloud/aiplatform_v1/gapic_metadata.json +++ b/google/cloud/aiplatform_v1/gapic_metadata.json @@ -570,26 +570,51 @@ "grpc": { "libraryClient": "PipelineServiceClient", "rpcs": { + "CancelPipelineJob": { + "methods": [ + "cancel_pipeline_job" + ] + }, "CancelTrainingPipeline": { "methods": [ "cancel_training_pipeline" ] }, + "CreatePipelineJob": { + "methods": [ + "create_pipeline_job" + ] + }, "CreateTrainingPipeline": { "methods": [ "create_training_pipeline" ] }, + "DeletePipelineJob": { + "methods": [ + "delete_pipeline_job" + ] + }, "DeleteTrainingPipeline": { "methods": [ "delete_training_pipeline" ] }, + "GetPipelineJob": { + "methods": [ + "get_pipeline_job" + ] + }, "GetTrainingPipeline": { "methods": [ "get_training_pipeline" ] }, + "ListPipelineJobs": { + "methods": [ + "list_pipeline_jobs" + ] + }, "ListTrainingPipelines": { "methods": [ "list_training_pipelines" @@ -600,26 +625,51 @@ "grpc-async": { "libraryClient": "PipelineServiceAsyncClient", "rpcs": { + "CancelPipelineJob": { + "methods": [ + "cancel_pipeline_job" + ] + }, "CancelTrainingPipeline": { "methods": [ "cancel_training_pipeline" ] }, + "CreatePipelineJob": { + "methods": [ + "create_pipeline_job" + ] + }, "CreateTrainingPipeline": { "methods": [ "create_training_pipeline" ] }, + "DeletePipelineJob": { + "methods": [ + "delete_pipeline_job" + ] + }, "DeleteTrainingPipeline": { "methods": [ "delete_training_pipeline" ] }, + "GetPipelineJob": { + "methods": [ + "get_pipeline_job" + ] + }, "GetTrainingPipeline": { "methods": [ "get_training_pipeline" ] }, + "ListPipelineJobs": { + "methods": [ + "list_pipeline_jobs" + ] + }, "ListTrainingPipelines": { "methods": [ "list_training_pipelines" diff --git a/google/cloud/aiplatform_v1/services/job_service/async_client.py b/google/cloud/aiplatform_v1/services/job_service/async_client.py index c3a2e84cd1..1af807d346 100644 --- a/google/cloud/aiplatform_v1/services/job_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/job_service/async_client.py @@ -87,6 +87,8 @@ class JobServiceAsyncClient: ) model_path = staticmethod(JobServiceClient.model_path) parse_model_path = staticmethod(JobServiceClient.parse_model_path) + network_path = staticmethod(JobServiceClient.network_path) + parse_network_path = staticmethod(JobServiceClient.parse_network_path) trial_path = staticmethod(JobServiceClient.trial_path) parse_trial_path = staticmethod(JobServiceClient.parse_trial_path) common_billing_account_path = staticmethod( diff --git a/google/cloud/aiplatform_v1/services/job_service/client.py b/google/cloud/aiplatform_v1/services/job_service/client.py index 687a832212..a6273cd93e 100644 --- a/google/cloud/aiplatform_v1/services/job_service/client.py +++ b/google/cloud/aiplatform_v1/services/job_service/client.py @@ -285,6 +285,21 @@ def parse_model_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def network_path(project: str, network: str,) -> str: + """Returns a fully-qualified network string.""" + return "projects/{project}/global/networks/{network}".format( + project=project, network=network, + ) + + @staticmethod + def parse_network_path(path: str) -> Dict[str, str]: + """Parses a network path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/global/networks/(?P.+?)$", path + ) + return m.groupdict() if m else {} + @staticmethod def trial_path(project: str, location: str, study: str, trial: str,) -> str: """Returns a fully-qualified trial string.""" diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py b/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py index 5da3ad8022..5608b0bbd6 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py @@ -32,6 +32,8 @@ from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import pipeline_state from google.cloud.aiplatform_v1.types import training_pipeline @@ -57,10 +59,24 @@ class PipelineServiceAsyncClient: DEFAULT_ENDPOINT = PipelineServiceClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = PipelineServiceClient.DEFAULT_MTLS_ENDPOINT + artifact_path = staticmethod(PipelineServiceClient.artifact_path) + parse_artifact_path = staticmethod(PipelineServiceClient.parse_artifact_path) + context_path = staticmethod(PipelineServiceClient.context_path) + parse_context_path = staticmethod(PipelineServiceClient.parse_context_path) + custom_job_path = staticmethod(PipelineServiceClient.custom_job_path) + parse_custom_job_path = staticmethod(PipelineServiceClient.parse_custom_job_path) endpoint_path = staticmethod(PipelineServiceClient.endpoint_path) parse_endpoint_path = staticmethod(PipelineServiceClient.parse_endpoint_path) + execution_path = staticmethod(PipelineServiceClient.execution_path) + parse_execution_path = staticmethod(PipelineServiceClient.parse_execution_path) model_path = staticmethod(PipelineServiceClient.model_path) parse_model_path = staticmethod(PipelineServiceClient.parse_model_path) + network_path = staticmethod(PipelineServiceClient.network_path) + parse_network_path = staticmethod(PipelineServiceClient.parse_network_path) + pipeline_job_path = staticmethod(PipelineServiceClient.pipeline_job_path) + parse_pipeline_job_path = staticmethod( + PipelineServiceClient.parse_pipeline_job_path + ) training_pipeline_path = staticmethod(PipelineServiceClient.training_pipeline_path) parse_training_pipeline_path = staticmethod( PipelineServiceClient.parse_training_pipeline_path @@ -597,6 +613,422 @@ async def cancel_training_pipeline( request, retry=retry, timeout=timeout, metadata=metadata, ) + async def create_pipeline_job( + self, + request: pipeline_service.CreatePipelineJobRequest = None, + *, + parent: str = None, + pipeline_job: gca_pipeline_job.PipelineJob = None, + pipeline_job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_pipeline_job.PipelineJob: + r"""Creates a PipelineJob. A PipelineJob will run + immediately when created. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.CreatePipelineJobRequest`): + The request object. Request message for + [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1.PipelineService.CreatePipelineJob]. + parent (:class:`str`): + Required. The resource name of the Location to create + the PipelineJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + pipeline_job (:class:`google.cloud.aiplatform_v1.types.PipelineJob`): + Required. The PipelineJob to create. + This corresponds to the ``pipeline_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + pipeline_job_id (:class:`str`): + The ID to use for the PipelineJob, which will become the + final component of the PipelineJob name. If not + provided, an ID will be automatically generated. + + This value should be less than 128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``pipeline_job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.PipelineJob: + An instance of a machine learning + PipelineJob. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, pipeline_job, pipeline_job_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = pipeline_service.CreatePipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if pipeline_job is not None: + request.pipeline_job = pipeline_job + if pipeline_job_id is not None: + request.pipeline_job_id = pipeline_job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_pipeline_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_pipeline_job( + self, + request: pipeline_service.GetPipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pipeline_job.PipelineJob: + r"""Gets a PipelineJob. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.GetPipelineJobRequest`): + The request object. Request message for + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob]. + name (:class:`str`): + Required. The name of the PipelineJob resource. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.PipelineJob: + An instance of a machine learning + PipelineJob. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = pipeline_service.GetPipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_pipeline_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_pipeline_jobs( + self, + request: pipeline_service.ListPipelineJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPipelineJobsAsyncPager: + r"""Lists PipelineJobs in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ListPipelineJobsRequest`): + The request object. Request message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs]. + parent (:class:`str`): + Required. The resource name of the Location to list the + PipelineJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListPipelineJobsAsyncPager: + Response message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = pipeline_service.ListPipelineJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_pipeline_jobs, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListPipelineJobsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_pipeline_job( + self, + request: pipeline_service.DeletePipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a PipelineJob. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.DeletePipelineJobRequest`): + The request object. Request message for + [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1.PipelineService.DeletePipelineJob]. + name (:class:`str`): + Required. The name of the PipelineJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = pipeline_service.DeletePipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_pipeline_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def cancel_pipeline_job( + self, + request: pipeline_service.CancelPipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a PipelineJob. Starts asynchronous cancellation on the + PipelineJob. The server makes a best effort to cancel the + pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the PipelineJob is not deleted; instead + it becomes a pipeline with a + [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] + is set to ``CANCELLED``. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.CancelPipelineJobRequest`): + The request object. Request message for + [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1.PipelineService.CancelPipelineJob]. + name (:class:`str`): + Required. The name of the PipelineJob to cancel. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = pipeline_service.CancelPipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_pipeline_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/client.py b/google/cloud/aiplatform_v1/services/pipeline_service/client.py index 9ea5595638..3266a00e97 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/client.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/client.py @@ -36,6 +36,8 @@ from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import pipeline_state from google.cloud.aiplatform_v1.types import training_pipeline @@ -171,6 +173,64 @@ def transport(self) -> PipelineServiceTransport: """ return self._transport + @staticmethod + def artifact_path( + project: str, location: str, metadata_store: str, artifact: str, + ) -> str: + """Returns a fully-qualified artifact string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format( + project=project, + location=location, + metadata_store=metadata_store, + artifact=artifact, + ) + + @staticmethod + def parse_artifact_path(path: str) -> Dict[str, str]: + """Parses a artifact path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/artifacts/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def context_path( + project: str, location: str, metadata_store: str, context: str, + ) -> str: + """Returns a fully-qualified context string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format( + project=project, + location=location, + metadata_store=metadata_store, + context=context, + ) + + @staticmethod + def parse_context_path(path: str) -> Dict[str, str]: + """Parses a context path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/contexts/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def custom_job_path(project: str, location: str, custom_job: str,) -> str: + """Returns a fully-qualified custom_job string.""" + return "projects/{project}/locations/{location}/customJobs/{custom_job}".format( + project=project, location=location, custom_job=custom_job, + ) + + @staticmethod + def parse_custom_job_path(path: str) -> Dict[str, str]: + """Parses a custom_job path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def endpoint_path(project: str, location: str, endpoint: str,) -> str: """Returns a fully-qualified endpoint string.""" @@ -187,6 +247,27 @@ def parse_endpoint_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def execution_path( + project: str, location: str, metadata_store: str, execution: str, + ) -> str: + """Returns a fully-qualified execution string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format( + project=project, + location=location, + metadata_store=metadata_store, + execution=execution, + ) + + @staticmethod + def parse_execution_path(path: str) -> Dict[str, str]: + """Parses a execution path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/executions/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def model_path(project: str, location: str, model: str,) -> str: """Returns a fully-qualified model string.""" @@ -203,6 +284,37 @@ def parse_model_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def network_path(project: str, network: str,) -> str: + """Returns a fully-qualified network string.""" + return "projects/{project}/global/networks/{network}".format( + project=project, network=network, + ) + + @staticmethod + def parse_network_path(path: str) -> Dict[str, str]: + """Parses a network path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/global/networks/(?P.+?)$", path + ) + return m.groupdict() if m else {} + + @staticmethod + def pipeline_job_path(project: str, location: str, pipeline_job: str,) -> str: + """Returns a fully-qualified pipeline_job string.""" + return "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format( + project=project, location=location, pipeline_job=pipeline_job, + ) + + @staticmethod + def parse_pipeline_job_path(path: str) -> Dict[str, str]: + """Parses a pipeline_job path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/pipelineJobs/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def training_pipeline_path( project: str, location: str, training_pipeline: str, @@ -810,6 +922,422 @@ def cancel_training_pipeline( request, retry=retry, timeout=timeout, metadata=metadata, ) + def create_pipeline_job( + self, + request: pipeline_service.CreatePipelineJobRequest = None, + *, + parent: str = None, + pipeline_job: gca_pipeline_job.PipelineJob = None, + pipeline_job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_pipeline_job.PipelineJob: + r"""Creates a PipelineJob. A PipelineJob will run + immediately when created. + + Args: + request (google.cloud.aiplatform_v1.types.CreatePipelineJobRequest): + The request object. Request message for + [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1.PipelineService.CreatePipelineJob]. + parent (str): + Required. The resource name of the Location to create + the PipelineJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + pipeline_job (google.cloud.aiplatform_v1.types.PipelineJob): + Required. The PipelineJob to create. + This corresponds to the ``pipeline_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + pipeline_job_id (str): + The ID to use for the PipelineJob, which will become the + final component of the PipelineJob name. If not + provided, an ID will be automatically generated. + + This value should be less than 128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``pipeline_job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.PipelineJob: + An instance of a machine learning + PipelineJob. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, pipeline_job, pipeline_job_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.CreatePipelineJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.CreatePipelineJobRequest): + request = pipeline_service.CreatePipelineJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if pipeline_job is not None: + request.pipeline_job = pipeline_job + if pipeline_job_id is not None: + request.pipeline_job_id = pipeline_job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_pipeline_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_pipeline_job( + self, + request: pipeline_service.GetPipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pipeline_job.PipelineJob: + r"""Gets a PipelineJob. + + Args: + request (google.cloud.aiplatform_v1.types.GetPipelineJobRequest): + The request object. Request message for + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob]. + name (str): + Required. The name of the PipelineJob resource. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.PipelineJob: + An instance of a machine learning + PipelineJob. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.GetPipelineJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.GetPipelineJobRequest): + request = pipeline_service.GetPipelineJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_pipeline_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_pipeline_jobs( + self, + request: pipeline_service.ListPipelineJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPipelineJobsPager: + r"""Lists PipelineJobs in a Location. + + Args: + request (google.cloud.aiplatform_v1.types.ListPipelineJobsRequest): + The request object. Request message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs]. + parent (str): + Required. The resource name of the Location to list the + PipelineJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListPipelineJobsPager: + Response message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.ListPipelineJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.ListPipelineJobsRequest): + request = pipeline_service.ListPipelineJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_pipeline_jobs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPipelineJobsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_pipeline_job( + self, + request: pipeline_service.DeletePipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a PipelineJob. + + Args: + request (google.cloud.aiplatform_v1.types.DeletePipelineJobRequest): + The request object. Request message for + [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1.PipelineService.DeletePipelineJob]. + name (str): + Required. The name of the PipelineJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.DeletePipelineJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.DeletePipelineJobRequest): + request = pipeline_service.DeletePipelineJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_pipeline_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def cancel_pipeline_job( + self, + request: pipeline_service.CancelPipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a PipelineJob. Starts asynchronous cancellation on the + PipelineJob. The server makes a best effort to cancel the + pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the PipelineJob is not deleted; instead + it becomes a pipeline with a + [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] + is set to ``CANCELLED``. + + Args: + request (google.cloud.aiplatform_v1.types.CancelPipelineJobRequest): + The request object. Request message for + [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1.PipelineService.CancelPipelineJob]. + name (str): + Required. The name of the PipelineJob to cancel. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.CancelPipelineJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.CancelPipelineJobRequest): + request = pipeline_service.CancelPipelineJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_pipeline_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py b/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py index 280d676b4b..1dd7ce291c 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py @@ -24,6 +24,7 @@ Optional, ) +from google.cloud.aiplatform_v1.types import pipeline_job from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import training_pipeline @@ -158,3 +159,131 @@ async def async_generator(): def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListPipelineJobsPager: + """A pager for iterating through ``list_pipeline_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListPipelineJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``pipeline_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListPipelineJobs`` requests and continue to iterate + through the ``pipeline_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListPipelineJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., pipeline_service.ListPipelineJobsResponse], + request: pipeline_service.ListPipelineJobsRequest, + response: pipeline_service.ListPipelineJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListPipelineJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListPipelineJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = pipeline_service.ListPipelineJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[pipeline_service.ListPipelineJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[pipeline_job.PipelineJob]: + for page in self.pages: + yield from page.pipeline_jobs + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListPipelineJobsAsyncPager: + """A pager for iterating through ``list_pipeline_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListPipelineJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``pipeline_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListPipelineJobs`` requests and continue to iterate + through the ``pipeline_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListPipelineJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[pipeline_service.ListPipelineJobsResponse]], + request: pipeline_service.ListPipelineJobsRequest, + response: pipeline_service.ListPipelineJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListPipelineJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListPipelineJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = pipeline_service.ListPipelineJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[pipeline_service.ListPipelineJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[pipeline_job.PipelineJob]: + async def async_generator(): + async for page in self.pages: + for response in page.pipeline_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py index 58b449654d..04cef84b9d 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py @@ -27,6 +27,8 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +from google.cloud.aiplatform_v1.types import pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import training_pipeline from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline @@ -184,6 +186,21 @@ def _prep_wrapped_messages(self, client_info): default_timeout=5.0, client_info=client_info, ), + self.create_pipeline_job: gapic_v1.method.wrap_method( + self.create_pipeline_job, default_timeout=None, client_info=client_info, + ), + self.get_pipeline_job: gapic_v1.method.wrap_method( + self.get_pipeline_job, default_timeout=None, client_info=client_info, + ), + self.list_pipeline_jobs: gapic_v1.method.wrap_method( + self.list_pipeline_jobs, default_timeout=None, client_info=client_info, + ), + self.delete_pipeline_job: gapic_v1.method.wrap_method( + self.delete_pipeline_job, default_timeout=None, client_info=client_info, + ), + self.cancel_pipeline_job: gapic_v1.method.wrap_method( + self.cancel_pipeline_job, default_timeout=None, client_info=client_info, + ), } @property @@ -245,5 +262,53 @@ def cancel_training_pipeline( ]: raise NotImplementedError() + @property + def create_pipeline_job( + self, + ) -> Callable[ + [pipeline_service.CreatePipelineJobRequest], + Union[gca_pipeline_job.PipelineJob, Awaitable[gca_pipeline_job.PipelineJob]], + ]: + raise NotImplementedError() + + @property + def get_pipeline_job( + self, + ) -> Callable[ + [pipeline_service.GetPipelineJobRequest], + Union[pipeline_job.PipelineJob, Awaitable[pipeline_job.PipelineJob]], + ]: + raise NotImplementedError() + + @property + def list_pipeline_jobs( + self, + ) -> Callable[ + [pipeline_service.ListPipelineJobsRequest], + Union[ + pipeline_service.ListPipelineJobsResponse, + Awaitable[pipeline_service.ListPipelineJobsResponse], + ], + ]: + raise NotImplementedError() + + @property + def delete_pipeline_job( + self, + ) -> Callable[ + [pipeline_service.DeletePipelineJobRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_pipeline_job( + self, + ) -> Callable[ + [pipeline_service.CancelPipelineJobRequest], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], + ]: + raise NotImplementedError() + __all__ = ("PipelineServiceTransport",) diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py index 8018cf1e3d..71adcdd6ca 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py @@ -25,6 +25,8 @@ import grpc # type: ignore +from google.cloud.aiplatform_v1.types import pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import training_pipeline from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline @@ -402,5 +404,155 @@ def cancel_training_pipeline( ) return self._stubs["cancel_training_pipeline"] + @property + def create_pipeline_job( + self, + ) -> Callable[ + [pipeline_service.CreatePipelineJobRequest], gca_pipeline_job.PipelineJob + ]: + r"""Return a callable for the create pipeline job method over gRPC. + + Creates a PipelineJob. A PipelineJob will run + immediately when created. + + Returns: + Callable[[~.CreatePipelineJobRequest], + ~.PipelineJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_pipeline_job" not in self._stubs: + self._stubs["create_pipeline_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PipelineService/CreatePipelineJob", + request_serializer=pipeline_service.CreatePipelineJobRequest.serialize, + response_deserializer=gca_pipeline_job.PipelineJob.deserialize, + ) + return self._stubs["create_pipeline_job"] + + @property + def get_pipeline_job( + self, + ) -> Callable[[pipeline_service.GetPipelineJobRequest], pipeline_job.PipelineJob]: + r"""Return a callable for the get pipeline job method over gRPC. + + Gets a PipelineJob. + + Returns: + Callable[[~.GetPipelineJobRequest], + ~.PipelineJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_pipeline_job" not in self._stubs: + self._stubs["get_pipeline_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PipelineService/GetPipelineJob", + request_serializer=pipeline_service.GetPipelineJobRequest.serialize, + response_deserializer=pipeline_job.PipelineJob.deserialize, + ) + return self._stubs["get_pipeline_job"] + + @property + def list_pipeline_jobs( + self, + ) -> Callable[ + [pipeline_service.ListPipelineJobsRequest], + pipeline_service.ListPipelineJobsResponse, + ]: + r"""Return a callable for the list pipeline jobs method over gRPC. + + Lists PipelineJobs in a Location. + + Returns: + Callable[[~.ListPipelineJobsRequest], + ~.ListPipelineJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_pipeline_jobs" not in self._stubs: + self._stubs["list_pipeline_jobs"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PipelineService/ListPipelineJobs", + request_serializer=pipeline_service.ListPipelineJobsRequest.serialize, + response_deserializer=pipeline_service.ListPipelineJobsResponse.deserialize, + ) + return self._stubs["list_pipeline_jobs"] + + @property + def delete_pipeline_job( + self, + ) -> Callable[ + [pipeline_service.DeletePipelineJobRequest], operations_pb2.Operation + ]: + r"""Return a callable for the delete pipeline job method over gRPC. + + Deletes a PipelineJob. + + Returns: + Callable[[~.DeletePipelineJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_pipeline_job" not in self._stubs: + self._stubs["delete_pipeline_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PipelineService/DeletePipelineJob", + request_serializer=pipeline_service.DeletePipelineJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_pipeline_job"] + + @property + def cancel_pipeline_job( + self, + ) -> Callable[[pipeline_service.CancelPipelineJobRequest], empty_pb2.Empty]: + r"""Return a callable for the cancel pipeline job method over gRPC. + + Cancels a PipelineJob. Starts asynchronous cancellation on the + PipelineJob. The server makes a best effort to cancel the + pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the PipelineJob is not deleted; instead + it becomes a pipeline with a + [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelPipelineJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_pipeline_job" not in self._stubs: + self._stubs["cancel_pipeline_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PipelineService/CancelPipelineJob", + request_serializer=pipeline_service.CancelPipelineJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["cancel_pipeline_job"] + __all__ = ("PipelineServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py index de09eb535f..a7712b19dd 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py @@ -26,6 +26,8 @@ import grpc # type: ignore from grpc.experimental import aio # type: ignore +from google.cloud.aiplatform_v1.types import pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import training_pipeline from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline @@ -410,5 +412,160 @@ def cancel_training_pipeline( ) return self._stubs["cancel_training_pipeline"] + @property + def create_pipeline_job( + self, + ) -> Callable[ + [pipeline_service.CreatePipelineJobRequest], + Awaitable[gca_pipeline_job.PipelineJob], + ]: + r"""Return a callable for the create pipeline job method over gRPC. + + Creates a PipelineJob. A PipelineJob will run + immediately when created. + + Returns: + Callable[[~.CreatePipelineJobRequest], + Awaitable[~.PipelineJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_pipeline_job" not in self._stubs: + self._stubs["create_pipeline_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PipelineService/CreatePipelineJob", + request_serializer=pipeline_service.CreatePipelineJobRequest.serialize, + response_deserializer=gca_pipeline_job.PipelineJob.deserialize, + ) + return self._stubs["create_pipeline_job"] + + @property + def get_pipeline_job( + self, + ) -> Callable[ + [pipeline_service.GetPipelineJobRequest], Awaitable[pipeline_job.PipelineJob] + ]: + r"""Return a callable for the get pipeline job method over gRPC. + + Gets a PipelineJob. + + Returns: + Callable[[~.GetPipelineJobRequest], + Awaitable[~.PipelineJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_pipeline_job" not in self._stubs: + self._stubs["get_pipeline_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PipelineService/GetPipelineJob", + request_serializer=pipeline_service.GetPipelineJobRequest.serialize, + response_deserializer=pipeline_job.PipelineJob.deserialize, + ) + return self._stubs["get_pipeline_job"] + + @property + def list_pipeline_jobs( + self, + ) -> Callable[ + [pipeline_service.ListPipelineJobsRequest], + Awaitable[pipeline_service.ListPipelineJobsResponse], + ]: + r"""Return a callable for the list pipeline jobs method over gRPC. + + Lists PipelineJobs in a Location. + + Returns: + Callable[[~.ListPipelineJobsRequest], + Awaitable[~.ListPipelineJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_pipeline_jobs" not in self._stubs: + self._stubs["list_pipeline_jobs"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PipelineService/ListPipelineJobs", + request_serializer=pipeline_service.ListPipelineJobsRequest.serialize, + response_deserializer=pipeline_service.ListPipelineJobsResponse.deserialize, + ) + return self._stubs["list_pipeline_jobs"] + + @property + def delete_pipeline_job( + self, + ) -> Callable[ + [pipeline_service.DeletePipelineJobRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the delete pipeline job method over gRPC. + + Deletes a PipelineJob. + + Returns: + Callable[[~.DeletePipelineJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_pipeline_job" not in self._stubs: + self._stubs["delete_pipeline_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PipelineService/DeletePipelineJob", + request_serializer=pipeline_service.DeletePipelineJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_pipeline_job"] + + @property + def cancel_pipeline_job( + self, + ) -> Callable[ + [pipeline_service.CancelPipelineJobRequest], Awaitable[empty_pb2.Empty] + ]: + r"""Return a callable for the cancel pipeline job method over gRPC. + + Cancels a PipelineJob. Starts asynchronous cancellation on the + PipelineJob. The server makes a best effort to cancel the + pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the PipelineJob is not deleted; instead + it becomes a pipeline with a + [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelPipelineJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_pipeline_job" not in self._stubs: + self._stubs["cancel_pipeline_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PipelineService/CancelPipelineJob", + request_serializer=pipeline_service.CancelPipelineJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["cancel_pipeline_job"] + __all__ = ("PipelineServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1/services/prediction_service/async_client.py index fe69ba8c58..b5cde52017 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/async_client.py @@ -166,8 +166,8 @@ async def predict( request: prediction_service.PredictRequest = None, *, endpoint: str = None, - instances: Sequence[struct_pb2.Value] = None, parameters: struct_pb2.Value = None, + instances: Sequence[struct_pb2.Value] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -186,6 +186,17 @@ async def predict( This corresponds to the ``endpoint`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + parameters (:class:`google.protobuf.struct_pb2.Value`): + The parameters that govern the prediction. The schema of + the parameters may be specified via Endpoint's + DeployedModels' [Model's + ][google.cloud.aiplatform.v1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. + + This corresponds to the ``parameters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. instances (:class:`Sequence[google.protobuf.struct_pb2.Value]`): Required. The instances that are the input to the prediction call. A DeployedModel may have an upper limit @@ -202,17 +213,6 @@ async def predict( This corresponds to the ``instances`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - parameters (:class:`google.protobuf.struct_pb2.Value`): - The parameters that govern the prediction. The schema of - the parameters may be specified via Endpoint's - DeployedModels' [Model's - ][google.cloud.aiplatform.v1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. - - This corresponds to the ``parameters`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -228,7 +228,7 @@ async def predict( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, instances, parameters]) + has_flattened_params = any([endpoint, parameters, instances]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " diff --git a/google/cloud/aiplatform_v1/services/prediction_service/client.py b/google/cloud/aiplatform_v1/services/prediction_service/client.py index 6caef31722..d1a1198435 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/client.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/client.py @@ -353,8 +353,8 @@ def predict( request: prediction_service.PredictRequest = None, *, endpoint: str = None, - instances: Sequence[struct_pb2.Value] = None, parameters: struct_pb2.Value = None, + instances: Sequence[struct_pb2.Value] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -373,6 +373,17 @@ def predict( This corresponds to the ``endpoint`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + parameters (google.protobuf.struct_pb2.Value): + The parameters that govern the prediction. The schema of + the parameters may be specified via Endpoint's + DeployedModels' [Model's + ][google.cloud.aiplatform.v1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. + + This corresponds to the ``parameters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. instances (Sequence[google.protobuf.struct_pb2.Value]): Required. The instances that are the input to the prediction call. A DeployedModel may have an upper limit @@ -389,17 +400,6 @@ def predict( This corresponds to the ``instances`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - parameters (google.protobuf.struct_pb2.Value): - The parameters that govern the prediction. The schema of - the parameters may be specified via Endpoint's - DeployedModels' [Model's - ][google.cloud.aiplatform.v1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. - - This corresponds to the ``parameters`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -415,7 +415,7 @@ def predict( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, instances, parameters]) + has_flattened_params = any([endpoint, parameters, instances]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -432,10 +432,10 @@ def predict( # request, apply these. if endpoint is not None: request.endpoint = endpoint - if instances is not None: - request.instances.extend(instances) if parameters is not None: request.parameters = parameters + if instances is not None: + request.instances.extend(instances) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. diff --git a/google/cloud/aiplatform_v1/types/__init__.py b/google/cloud/aiplatform_v1/types/__init__.py index 42bab0a05e..6d03e0c47c 100644 --- a/google/cloud/aiplatform_v1/types/__init__.py +++ b/google/cloud/aiplatform_v1/types/__init__.py @@ -15,8 +15,10 @@ # from .annotation import Annotation from .annotation_spec import AnnotationSpec +from .artifact import Artifact from .batch_prediction_job import BatchPredictionJob from .completion_stats import CompletionStats +from .context import Context from .custom_job import ( ContainerSpec, CustomJob, @@ -79,6 +81,7 @@ UpdateEndpointRequest, ) from .env_var import EnvVar +from .execution import Execution from .hyperparameter_tuning_job import HyperparameterTuningJob from .io import ( BigQueryDestination, @@ -115,6 +118,7 @@ ) from .machine_resources import ( AutomaticResources, + AutoscalingMetricSpec, BatchDedicatedResources, DedicatedResources, DiskSpec, @@ -163,11 +167,23 @@ DeleteOperationMetadata, GenericOperationMetadata, ) +from .pipeline_job import ( + PipelineJob, + PipelineJobDetail, + PipelineTaskDetail, + PipelineTaskExecutorDetail, +) from .pipeline_service import ( + CancelPipelineJobRequest, CancelTrainingPipelineRequest, + CreatePipelineJobRequest, CreateTrainingPipelineRequest, + DeletePipelineJobRequest, DeleteTrainingPipelineRequest, + GetPipelineJobRequest, GetTrainingPipelineRequest, + ListPipelineJobsRequest, + ListPipelineJobsResponse, ListTrainingPipelinesRequest, ListTrainingPipelinesResponse, ) @@ -200,13 +216,16 @@ TrainingPipeline, ) from .user_action_reference import UserActionReference +from .value import Value __all__ = ( "AcceleratorType", "Annotation", "AnnotationSpec", + "Artifact", "BatchPredictionJob", "CompletionStats", + "Context", "ContainerSpec", "CustomJob", "CustomJobSpec", @@ -257,6 +276,7 @@ "UndeployModelResponse", "UpdateEndpointRequest", "EnvVar", + "Execution", "HyperparameterTuningJob", "BigQueryDestination", "BigQuerySource", @@ -289,6 +309,7 @@ "ListHyperparameterTuningJobsResponse", "JobState", "AutomaticResources", + "AutoscalingMetricSpec", "BatchDedicatedResources", "DedicatedResources", "DiskSpec", @@ -328,10 +349,20 @@ "UploadModelResponse", "DeleteOperationMetadata", "GenericOperationMetadata", + "PipelineJob", + "PipelineJobDetail", + "PipelineTaskDetail", + "PipelineTaskExecutorDetail", + "CancelPipelineJobRequest", "CancelTrainingPipelineRequest", + "CreatePipelineJobRequest", "CreateTrainingPipelineRequest", + "DeletePipelineJobRequest", "DeleteTrainingPipelineRequest", + "GetPipelineJobRequest", "GetTrainingPipelineRequest", + "ListPipelineJobsRequest", + "ListPipelineJobsResponse", "ListTrainingPipelinesRequest", "ListTrainingPipelinesResponse", "PipelineState", @@ -356,4 +387,5 @@ "TimestampSplit", "TrainingPipeline", "UserActionReference", + "Value", ) diff --git a/google/cloud/aiplatform_v1/types/accelerator_type.py b/google/cloud/aiplatform_v1/types/accelerator_type.py index 6728739a23..168fe2474f 100644 --- a/google/cloud/aiplatform_v1/types/accelerator_type.py +++ b/google/cloud/aiplatform_v1/types/accelerator_type.py @@ -29,8 +29,7 @@ class AcceleratorType(proto.Enum): NVIDIA_TESLA_V100 = 3 NVIDIA_TESLA_P4 = 4 NVIDIA_TESLA_T4 = 5 - TPU_V2 = 6 - TPU_V3 = 7 + NVIDIA_TESLA_A100 = 8 __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/artifact.py b/google/cloud/aiplatform_v1/types/artifact.py new file mode 100644 index 0000000000..68428d03e8 --- /dev/null +++ b/google/cloud/aiplatform_v1/types/artifact.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", manifest={"Artifact",}, +) + + +class Artifact(proto.Message): + r"""Instance of a general artifact. + Attributes: + name (str): + Output only. The resource name of the + Artifact. + display_name (str): + User provided display name of the Artifact. + May be up to 128 Unicode characters. + uri (str): + The uniform resource identifier of the + artifact file. May be empty if there is no + actual artifact file. + etag (str): + An eTag used to perform consistent read- + odify-write updates. If not set, a blind + "overwrite" update happens. + labels (Sequence[google.cloud.aiplatform_v1.types.Artifact.LabelsEntry]): + The labels with user-defined metadata to + organize your Artifacts. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one Artifact (System labels are + excluded). + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Artifact was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Artifact was + last updated. + state (google.cloud.aiplatform_v1.types.Artifact.State): + The state of this Artifact. This is a + property of the Artifact, and does not imply or + capture any ongoing process. This property is + managed by clients (such as Vertex Pipelines), + and the system does not prescribe or check the + validity of state transitions. + """ + + class State(proto.Enum): + r"""Describes the state of the Artifact.""" + STATE_UNSPECIFIED = 0 + PENDING = 1 + LIVE = 2 + + name = proto.Field(proto.STRING, number=1,) + display_name = proto.Field(proto.STRING, number=2,) + uri = proto.Field(proto.STRING, number=6,) + etag = proto.Field(proto.STRING, number=9,) + labels = proto.MapField(proto.STRING, proto.STRING, number=10,) + create_time = proto.Field( + proto.MESSAGE, number=11, message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, number=12, message=timestamp_pb2.Timestamp, + ) + state = proto.Field(proto.ENUM, number=13, enum=State,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/batch_prediction_job.py b/google/cloud/aiplatform_v1/types/batch_prediction_job.py index 32194179ab..8b3e8fe60c 100644 --- a/google/cloud/aiplatform_v1/types/batch_prediction_job.py +++ b/google/cloud/aiplatform_v1/types/batch_prediction_job.py @@ -212,7 +212,7 @@ class OutputConfig(proto.Message): files are created (N depends on total number of failed predictions). These files contain the failed instances, as per their schema, followed by an additional ``error`` field - which as value has ```google.rpc.Status`` `__ + which as value has [google.rpc.Status][google.rpc.Status] containing only ``code`` and ``message`` fields. bigquery_destination (google.cloud.aiplatform_v1.types.BigQueryDestination): The BigQuery project or dataset location where the output is @@ -234,8 +234,8 @@ class OutputConfig(proto.Message): ``errors`` table contains rows for which the prediction has failed, it has instance columns, as per the instance schema, followed by a single "errors" column, which as values has - ```google.rpc.Status`` `__ represented as a STRUCT, - and containing only ``code`` and ``message``. + [google.rpc.Status][google.rpc.Status] represented as a + STRUCT, and containing only ``code`` and ``message``. predictions_format (str): Required. The format in which Vertex AI gives the predictions, must be one of the @@ -267,6 +267,11 @@ class OutputInfo(proto.Message): Output only. The path of the BigQuery dataset created, in ``bq://projectId.bqDatasetId`` format, into which the prediction output is written. + bigquery_output_table (str): + Output only. The name of the BigQuery table created, in + ``predictions_`` format, into which the + prediction output is written. Can be used by UI to generate + the BigQuery output path, for example. """ gcs_output_directory = proto.Field( @@ -275,6 +280,7 @@ class OutputInfo(proto.Message): bigquery_output_dataset = proto.Field( proto.STRING, number=2, oneof="output_location", ) + bigquery_output_table = proto.Field(proto.STRING, number=4,) name = proto.Field(proto.STRING, number=1,) display_name = proto.Field(proto.STRING, number=2,) diff --git a/google/cloud/aiplatform_v1/types/context.py b/google/cloud/aiplatform_v1/types/context.py new file mode 100644 index 0000000000..fc06f9c002 --- /dev/null +++ b/google/cloud/aiplatform_v1/types/context.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", manifest={"Context",}, +) + + +class Context(proto.Message): + r"""Instance of a general context. + Attributes: + name (str): + Output only. The resource name of the + Context. + display_name (str): + User provided display name of the Context. + May be up to 128 Unicode characters. + etag (str): + An eTag used to perform consistent read- + odify-write updates. If not set, a blind + "overwrite" update happens. + labels (Sequence[google.cloud.aiplatform_v1.types.Context.LabelsEntry]): + The labels with user-defined metadata to + organize your Contexts. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one Context (System labels are + excluded). + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Context was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Context was + last updated. + parent_contexts (Sequence[str]): + Output only. A list of resource names of Contexts that are + parents of this Context. A Context may have at most 10 + parent_contexts. + """ + + name = proto.Field(proto.STRING, number=1,) + display_name = proto.Field(proto.STRING, number=2,) + etag = proto.Field(proto.STRING, number=8,) + labels = proto.MapField(proto.STRING, proto.STRING, number=9,) + create_time = proto.Field( + proto.MESSAGE, number=10, message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, number=11, message=timestamp_pb2.Timestamp, + ) + parent_contexts = proto.RepeatedField(proto.STRING, number=12,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/execution.py b/google/cloud/aiplatform_v1/types/execution.py new file mode 100644 index 0000000000..3178936646 --- /dev/null +++ b/google/cloud/aiplatform_v1/types/execution.py @@ -0,0 +1,87 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", manifest={"Execution",}, +) + + +class Execution(proto.Message): + r"""Instance of a general execution. + Attributes: + name (str): + Output only. The resource name of the + Execution. + display_name (str): + User provided display name of the Execution. + May be up to 128 Unicode characters. + state (google.cloud.aiplatform_v1.types.Execution.State): + The state of this Execution. This is a + property of the Execution, and does not imply or + capture any ongoing process. This property is + managed by clients (such as Vertex Pipelines) + and the system does not prescribe or check the + validity of state transitions. + etag (str): + An eTag used to perform consistent read- + odify-write updates. If not set, a blind + "overwrite" update happens. + labels (Sequence[google.cloud.aiplatform_v1.types.Execution.LabelsEntry]): + The labels with user-defined metadata to + organize your Executions. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one Execution (System labels are + excluded). + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Execution + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Execution + was last updated. + """ + + class State(proto.Enum): + r"""Describes the state of the Execution.""" + STATE_UNSPECIFIED = 0 + NEW = 1 + RUNNING = 2 + COMPLETE = 3 + FAILED = 4 + CACHED = 5 + CANCELLED = 6 + + name = proto.Field(proto.STRING, number=1,) + display_name = proto.Field(proto.STRING, number=2,) + state = proto.Field(proto.ENUM, number=6, enum=State,) + etag = proto.Field(proto.STRING, number=9,) + labels = proto.MapField(proto.STRING, proto.STRING, number=10,) + create_time = proto.Field( + proto.MESSAGE, number=11, message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, number=12, message=timestamp_pb2.Timestamp, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/io.py b/google/cloud/aiplatform_v1/types/io.py index d23f3d91db..3b99270f71 100644 --- a/google/cloud/aiplatform_v1/types/io.py +++ b/google/cloud/aiplatform_v1/types/io.py @@ -84,6 +84,7 @@ class BigQueryDestination(proto.Message): Accepted forms: - BigQuery path. For example: ``bq://projectId`` or + ``bq://projectId.bqDatasetId`` or ``bq://projectId.bqDatasetId.bqTableId``. """ diff --git a/google/cloud/aiplatform_v1/types/job_state.py b/google/cloud/aiplatform_v1/types/job_state.py index 59c0949844..f5c7ebba63 100644 --- a/google/cloud/aiplatform_v1/types/job_state.py +++ b/google/cloud/aiplatform_v1/types/job_state.py @@ -32,6 +32,7 @@ class JobState(proto.Enum): JOB_STATE_CANCELLING = 6 JOB_STATE_CANCELLED = 7 JOB_STATE_PAUSED = 8 + JOB_STATE_EXPIRED = 9 __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/machine_resources.py b/google/cloud/aiplatform_v1/types/machine_resources.py index fb926e987d..d42e58ab0c 100644 --- a/google/cloud/aiplatform_v1/types/machine_resources.py +++ b/google/cloud/aiplatform_v1/types/machine_resources.py @@ -27,6 +27,7 @@ "BatchDedicatedResources", "ResourcesConsumed", "DiskSpec", + "AutoscalingMetricSpec", }, ) @@ -77,15 +78,14 @@ class DedicatedResources(proto.Message): Required. Immutable. The specification of a single machine used by the prediction. min_replica_count (int): - Required. Immutable. The minimum number of machine replicas - this DeployedModel will be always deployed on. If traffic - against it increases, it may dynamically be deployed onto - more replicas, and as traffic decreases, some of these extra - replicas may be freed. Note: if - [machine_spec.accelerator_count][google.cloud.aiplatform.v1.MachineSpec.accelerator_count] - is above 0, currently the model will be always deployed - precisely on - [min_replica_count][google.cloud.aiplatform.v1.DedicatedResources.min_replica_count]. + Required. Immutable. The minimum number of + machine replicas this DeployedModel will be + always deployed on. This value must be greater + than or equal to 1. + If traffic against the DeployedModel increases, + it may dynamically be deployed onto more + replicas, and as traffic decreases, some of + these extra replicas may be freed. max_replica_count (int): Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If @@ -98,11 +98,42 @@ class DedicatedResources(proto.Message): will use [min_replica_count][google.cloud.aiplatform.v1.DedicatedResources.min_replica_count] as the default value. + autoscaling_metric_specs (Sequence[google.cloud.aiplatform_v1.types.AutoscalingMetricSpec]): + Immutable. The metric specifications that overrides a + resource utilization metric (CPU utilization, accelerator's + duty cycle, and so on) target value (default to 60 if not + set). At most one entry is allowed per metric. + + If + [machine_spec.accelerator_count][google.cloud.aiplatform.v1.MachineSpec.accelerator_count] + is above 0, the autoscaling will be based on both CPU + utilization and accelerator's duty cycle metrics and scale + up when either metrics exceeds its target value while scale + down if both metrics are under their target value. The + default target value is 60 for both metrics. + + If + [machine_spec.accelerator_count][google.cloud.aiplatform.v1.MachineSpec.accelerator_count] + is 0, the autoscaling will be based on CPU utilization + metric only with default target value 60 if not explicitly + set. + + For example, in the case of Online Prediction, if you want + to override target CPU utilization to 80, you should set + [autoscaling_metric_specs.metric_name][google.cloud.aiplatform.v1.AutoscalingMetricSpec.metric_name] + to + ``aiplatform.googleapis.com/prediction/online/cpu/utilization`` + and + [autoscaling_metric_specs.target][google.cloud.aiplatform.v1.AutoscalingMetricSpec.target] + to ``80``. """ machine_spec = proto.Field(proto.MESSAGE, number=1, message="MachineSpec",) min_replica_count = proto.Field(proto.INT32, number=2,) max_replica_count = proto.Field(proto.INT32, number=3,) + autoscaling_metric_specs = proto.RepeatedField( + proto.MESSAGE, number=4, message="AutoscalingMetricSpec", + ) class AutomaticResources(proto.Message): @@ -197,4 +228,29 @@ class DiskSpec(proto.Message): boot_disk_size_gb = proto.Field(proto.INT32, number=2,) +class AutoscalingMetricSpec(proto.Message): + r"""The metric specification that defines the target resource + utilization (CPU utilization, accelerator's duty cycle, and so + on) for calculating the desired replica count. + + Attributes: + metric_name (str): + Required. The resource metric name. Supported metrics: + + - For Online Prediction: + - ``aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle`` + - ``aiplatform.googleapis.com/prediction/online/cpu/utilization`` + target (int): + The target resource utilization in percentage + (1% - 100%) for the given metric; once the real + usage deviates from the target by a certain + percentage, the machine replicas change. The + default value is 60 (representing 60%) if not + provided. + """ + + metric_name = proto.Field(proto.STRING, number=1,) + target = proto.Field(proto.INT32, number=2,) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/pipeline_job.py b/google/cloud/aiplatform_v1/types/pipeline_job.py new file mode 100644 index 0000000000..d2f48fa8a7 --- /dev/null +++ b/google/cloud/aiplatform_v1/types/pipeline_job.py @@ -0,0 +1,311 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import artifact +from google.cloud.aiplatform_v1.types import context +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1.types import execution as gca_execution +from google.cloud.aiplatform_v1.types import pipeline_state +from google.cloud.aiplatform_v1.types import value as gca_value +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", + manifest={ + "PipelineJob", + "PipelineJobDetail", + "PipelineTaskDetail", + "PipelineTaskExecutorDetail", + }, +) + + +class PipelineJob(proto.Message): + r"""An instance of a machine learning PipelineJob. + Attributes: + name (str): + Output only. The resource name of the + PipelineJob. + display_name (str): + The display name of the Pipeline. + The name can be up to 128 characters long and + can be consist of any UTF-8 characters. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Pipeline creation time. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Pipeline start time. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Pipeline end time. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this PipelineJob + was most recently updated. + pipeline_spec (google.protobuf.struct_pb2.Struct): + Required. The spec of the pipeline. + state (google.cloud.aiplatform_v1.types.PipelineState): + Output only. The detailed state of the job. + job_detail (google.cloud.aiplatform_v1.types.PipelineJobDetail): + Output only. The details of pipeline run. Not + available in the list view. + error (google.rpc.status_pb2.Status): + Output only. The error that occurred during + pipeline execution. Only populated when the + pipeline's state is FAILED or CANCELLED. + labels (Sequence[google.cloud.aiplatform_v1.types.PipelineJob.LabelsEntry]): + The labels with user-defined metadata to + organize PipelineJob. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + runtime_config (google.cloud.aiplatform_v1.types.PipelineJob.RuntimeConfig): + Runtime config of the pipeline. + encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): + Customer-managed encryption key spec for a + pipelineJob. If set, this PipelineJob and all of + its sub-resources will be secured by this key. + service_account (str): + The service account that the pipeline workload runs as. If + not specified, the Compute Engine default service account in + the project will be used. See + https://cloud.google.com/compute/docs/access/service-accounts#default_service_account + + Users starting the pipeline must have the + ``iam.serviceAccounts.actAs`` permission on this service + account. + network (str): + The full name of the Compute Engine + `network `__ + to which the Pipeline Job's workload should be peered. For + example, ``projects/12345/global/networks/myVPC``. + `Format `__ + is of the form + ``projects/{project}/global/networks/{network}``. Where + {project} is a project number, as in ``12345``, and + {network} is a network name. + + Private services access must already be configured for the + network. Pipeline job will apply the network configuration + to the GCP resources being launched, if applied, such as + Vertex AI Training or Dataflow job. If left unspecified, the + workload is not peered with any network. + """ + + class RuntimeConfig(proto.Message): + r"""The runtime config of a PipelineJob. + Attributes: + parameters (Sequence[google.cloud.aiplatform_v1.types.PipelineJob.RuntimeConfig.ParametersEntry]): + The runtime parameters of the PipelineJob. The parameters + will be passed into + [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1.PipelineJob.pipeline_spec] + to replace the placeholders at runtime. + gcs_output_directory (str): + Required. A path in a Cloud Storage bucket, which will be + treated as the root output directory of the pipeline. It is + used by the system to generate the paths of output + artifacts. The artifact paths are generated with a sub-path + pattern ``{job_id}/{task_id}/{output_key}`` under the + specified output directory. The service account specified in + this pipeline must have the ``storage.objects.get`` and + ``storage.objects.create`` permissions for this bucket. + """ + + parameters = proto.MapField( + proto.STRING, proto.MESSAGE, number=1, message=gca_value.Value, + ) + gcs_output_directory = proto.Field(proto.STRING, number=2,) + + name = proto.Field(proto.STRING, number=1,) + display_name = proto.Field(proto.STRING, number=2,) + create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) + start_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,) + end_time = proto.Field(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,) + update_time = proto.Field(proto.MESSAGE, number=6, message=timestamp_pb2.Timestamp,) + pipeline_spec = proto.Field(proto.MESSAGE, number=7, message=struct_pb2.Struct,) + state = proto.Field(proto.ENUM, number=8, enum=pipeline_state.PipelineState,) + job_detail = proto.Field(proto.MESSAGE, number=9, message="PipelineJobDetail",) + error = proto.Field(proto.MESSAGE, number=10, message=status_pb2.Status,) + labels = proto.MapField(proto.STRING, proto.STRING, number=11,) + runtime_config = proto.Field(proto.MESSAGE, number=12, message=RuntimeConfig,) + encryption_spec = proto.Field( + proto.MESSAGE, number=16, message=gca_encryption_spec.EncryptionSpec, + ) + service_account = proto.Field(proto.STRING, number=17,) + network = proto.Field(proto.STRING, number=18,) + + +class PipelineJobDetail(proto.Message): + r"""The runtime detail of PipelineJob. + Attributes: + pipeline_context (google.cloud.aiplatform_v1.types.Context): + Output only. The context of the pipeline. + pipeline_run_context (google.cloud.aiplatform_v1.types.Context): + Output only. The context of the current + pipeline run. + task_details (Sequence[google.cloud.aiplatform_v1.types.PipelineTaskDetail]): + Output only. The runtime details of the tasks + under the pipeline. + """ + + pipeline_context = proto.Field(proto.MESSAGE, number=1, message=context.Context,) + pipeline_run_context = proto.Field( + proto.MESSAGE, number=2, message=context.Context, + ) + task_details = proto.RepeatedField( + proto.MESSAGE, number=3, message="PipelineTaskDetail", + ) + + +class PipelineTaskDetail(proto.Message): + r"""The runtime detail of a task execution. + Attributes: + task_id (int): + Output only. The system generated ID of the + task. + parent_task_id (int): + Output only. The id of the parent task if the + task is within a component scope. Empty if the + task is at the root level. + task_name (str): + Output only. The user specified name of the task that is + defined in [PipelineJob.spec][]. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Task create time. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Task start time. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Task end time. + executor_detail (google.cloud.aiplatform_v1.types.PipelineTaskExecutorDetail): + Output only. The detailed execution info. + state (google.cloud.aiplatform_v1.types.PipelineTaskDetail.State): + Output only. State of the task. + execution (google.cloud.aiplatform_v1.types.Execution): + Output only. The execution metadata of the + task. + error (google.rpc.status_pb2.Status): + Output only. The error that occurred during + task execution. Only populated when the task's + state is FAILED or CANCELLED. + inputs (Sequence[google.cloud.aiplatform_v1.types.PipelineTaskDetail.InputsEntry]): + Output only. The runtime input artifacts of + the task. + outputs (Sequence[google.cloud.aiplatform_v1.types.PipelineTaskDetail.OutputsEntry]): + Output only. The runtime output artifacts of + the task. + """ + + class State(proto.Enum): + r"""Specifies state of TaskExecution""" + STATE_UNSPECIFIED = 0 + PENDING = 1 + RUNNING = 2 + SUCCEEDED = 3 + CANCEL_PENDING = 4 + CANCELLING = 5 + CANCELLED = 6 + FAILED = 7 + SKIPPED = 8 + NOT_TRIGGERED = 9 + + class ArtifactList(proto.Message): + r"""A list of artifact metadata. + Attributes: + artifacts (Sequence[google.cloud.aiplatform_v1.types.Artifact]): + Output only. A list of artifact metadata. + """ + + artifacts = proto.RepeatedField( + proto.MESSAGE, number=1, message=artifact.Artifact, + ) + + task_id = proto.Field(proto.INT64, number=1,) + parent_task_id = proto.Field(proto.INT64, number=12,) + task_name = proto.Field(proto.STRING, number=2,) + create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) + start_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,) + end_time = proto.Field(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,) + executor_detail = proto.Field( + proto.MESSAGE, number=6, message="PipelineTaskExecutorDetail", + ) + state = proto.Field(proto.ENUM, number=7, enum=State,) + execution = proto.Field(proto.MESSAGE, number=8, message=gca_execution.Execution,) + error = proto.Field(proto.MESSAGE, number=9, message=status_pb2.Status,) + inputs = proto.MapField( + proto.STRING, proto.MESSAGE, number=10, message=ArtifactList, + ) + outputs = proto.MapField( + proto.STRING, proto.MESSAGE, number=11, message=ArtifactList, + ) + + +class PipelineTaskExecutorDetail(proto.Message): + r"""The runtime detail of a pipeline executor. + Attributes: + container_detail (google.cloud.aiplatform_v1.types.PipelineTaskExecutorDetail.ContainerDetail): + Output only. The detailed info for a + container executor. + custom_job_detail (google.cloud.aiplatform_v1.types.PipelineTaskExecutorDetail.CustomJobDetail): + Output only. The detailed info for a custom + job executor. + """ + + class ContainerDetail(proto.Message): + r"""The detail of a container execution. It contains the job + names of the lifecycle of a container execution. + + Attributes: + main_job (str): + Output only. The name of the + [CustomJob][google.cloud.aiplatform.v1.CustomJob] for the + main container execution. + pre_caching_check_job (str): + Output only. The name of the + [CustomJob][google.cloud.aiplatform.v1.CustomJob] for the + pre-caching-check container execution. This job will be + available if the + [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1.PipelineJob.pipeline_spec] + specifies the ``pre_caching_check`` hook in the lifecycle + events. + """ + + main_job = proto.Field(proto.STRING, number=1,) + pre_caching_check_job = proto.Field(proto.STRING, number=2,) + + class CustomJobDetail(proto.Message): + r"""The detailed info for a custom job executor. + Attributes: + job (str): + Output only. The name of the + [CustomJob][google.cloud.aiplatform.v1.CustomJob]. + """ + + job = proto.Field(proto.STRING, number=1,) + + container_detail = proto.Field( + proto.MESSAGE, number=1, oneof="details", message=ContainerDetail, + ) + custom_job_detail = proto.Field( + proto.MESSAGE, number=2, oneof="details", message=CustomJobDetail, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/pipeline_service.py b/google/cloud/aiplatform_v1/types/pipeline_service.py index 87681a47e6..565952c931 100644 --- a/google/cloud/aiplatform_v1/types/pipeline_service.py +++ b/google/cloud/aiplatform_v1/types/pipeline_service.py @@ -15,6 +15,7 @@ # import proto # type: ignore +from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline from google.protobuf import field_mask_pb2 # type: ignore @@ -28,6 +29,12 @@ "ListTrainingPipelinesResponse", "DeleteTrainingPipelineRequest", "CancelTrainingPipelineRequest", + "CreatePipelineJobRequest", + "GetPipelineJobRequest", + "ListPipelineJobsRequest", + "ListPipelineJobsResponse", + "DeletePipelineJobRequest", + "CancelPipelineJobRequest", }, ) @@ -74,21 +81,35 @@ class ListTrainingPipelinesRequest(proto.Message): TrainingPipelines from. Format: ``projects/{project}/locations/{location}`` filter (str): - The standard list filter. Supported fields: - - - ``display_name`` supports = and !=. - - - ``state`` supports = and !=. - - Some examples of using the filter are: - - - ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"`` - - - ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"`` - - - ``NOT display_name="my_pipeline"`` - - - ``state="PIPELINE_STATE_FAILED"`` + Lists the PipelineJobs that match the filter expression. The + following fields are supported: + + - ``pipeline_name``: Supports ``=`` and ``!=`` comparisons. + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC + 3339 format. + - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC + 3339 format. + - ``end_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC + 3339 format. + - ``labels``: Supports key-value equality and key presence. + + Filter expressions can be combined together using logical + operators (``AND`` & ``OR``). For example: + ``pipeline_name="test" AND create_time>"2020-05-18T13:30:00Z"``. + + The syntax to define filter expression is based on + https://google.aip.dev/160. + + Examples: + + - ``create_time>"2021-05-18T00:00:00Z" OR update_time>"2020-05-18T00:00:00Z"`` + PipelineJobs created or updated after 2020-05-18 00:00:00 + UTC. + - ``labels.env = "prod"`` PipelineJobs with label "env" set + to "prod". page_size (int): The standard list page size. page_token (str): @@ -160,4 +181,132 @@ class CancelTrainingPipelineRequest(proto.Message): name = proto.Field(proto.STRING, number=1,) +class CreatePipelineJobRequest(proto.Message): + r"""Request message for + [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1.PipelineService.CreatePipelineJob]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + PipelineJob in. Format: + ``projects/{project}/locations/{location}`` + pipeline_job (google.cloud.aiplatform_v1.types.PipelineJob): + Required. The PipelineJob to create. + pipeline_job_id (str): + The ID to use for the PipelineJob, which will become the + final component of the PipelineJob name. If not provided, an + ID will be automatically generated. + + This value should be less than 128 characters, and valid + characters are /[a-z][0-9]-/. + """ + + parent = proto.Field(proto.STRING, number=1,) + pipeline_job = proto.Field( + proto.MESSAGE, number=2, message=gca_pipeline_job.PipelineJob, + ) + pipeline_job_id = proto.Field(proto.STRING, number=3,) + + +class GetPipelineJobRequest(proto.Message): + r"""Request message for + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob]. + + Attributes: + name (str): + Required. The name of the PipelineJob resource. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + """ + + name = proto.Field(proto.STRING, number=1,) + + +class ListPipelineJobsRequest(proto.Message): + r"""Request message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + PipelineJobs from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. Supported fields: + + - ``display_name`` supports ``=`` and ``!=``. + - ``state`` supports ``=`` and ``!=``. + + The following examples demonstrate how to filter the list of + PipelineJobs: + + - ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"`` + - ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"`` + - ``NOT display_name="my_pipeline"`` + - ``state="PIPELINE_STATE_FAILED"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListPipelineJobsResponse.next_page_token][google.cloud.aiplatform.v1.ListPipelineJobsResponse.next_page_token] + of the previous + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs] + call. + """ + + parent = proto.Field(proto.STRING, number=1,) + filter = proto.Field(proto.STRING, number=2,) + page_size = proto.Field(proto.INT32, number=3,) + page_token = proto.Field(proto.STRING, number=4,) + + +class ListPipelineJobsResponse(proto.Message): + r"""Response message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs] + + Attributes: + pipeline_jobs (Sequence[google.cloud.aiplatform_v1.types.PipelineJob]): + List of PipelineJobs in the requested page. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListPipelineJobsRequest.page_token][google.cloud.aiplatform.v1.ListPipelineJobsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + pipeline_jobs = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_pipeline_job.PipelineJob, + ) + next_page_token = proto.Field(proto.STRING, number=2,) + + +class DeletePipelineJobRequest(proto.Message): + r"""Request message for + [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1.PipelineService.DeletePipelineJob]. + + Attributes: + name (str): + Required. The name of the PipelineJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + """ + + name = proto.Field(proto.STRING, number=1,) + + +class CancelPipelineJobRequest(proto.Message): + r"""Request message for + [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1.PipelineService.CancelPipelineJob]. + + Attributes: + name (str): + Required. The name of the PipelineJob to cancel. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + """ + + name = proto.Field(proto.STRING, number=1,) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/study.py b/google/cloud/aiplatform_v1/types/study.py index 40a6c84ce5..3f196b3963 100644 --- a/google/cloud/aiplatform_v1/types/study.py +++ b/google/cloud/aiplatform_v1/types/study.py @@ -15,6 +15,7 @@ # import proto # type: ignore +from google.protobuf import duration_pb2 # type: ignore from google.protobuf import struct_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore @@ -31,6 +32,9 @@ class Trial(proto.Message): objective metrics got by running the Trial. Attributes: + name (str): + Output only. Resource name of the Trial + assigned by the service. id (str): Output only. The identifier of the Trial assigned by the service. @@ -41,11 +45,30 @@ class Trial(proto.Message): final_measurement (google.cloud.aiplatform_v1.types.Measurement): Output only. The final measurement containing the objective value. + measurements (Sequence[google.cloud.aiplatform_v1.types.Measurement]): + Output only. A list of measurements that are strictly + lexicographically ordered by their induced tuples (steps, + elapsed_duration). These are used for early stopping + computations. start_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Time when the Trial was started. end_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Time when the Trial's status changed to ``SUCCEEDED`` or ``INFEASIBLE``. + client_id (str): + Output only. The identifier of the client that originally + requested this Trial. Each client is identified by a unique + client_id. When a client asks for a suggestion, Vizier will + assign it a Trial. The client should evaluate the Trial, + complete it, and report back to Vizier. If suggestion is + asked again by same client_id before the Trial is completed, + the same Trial will be returned. Multiple clients with + different client_ids can ask for suggestions simultaneously, + each of them will get their own Trial. + infeasible_reason (str): + Output only. A human readable string describing why the + Trial is infeasible. This is set only if Trial state is + ``INFEASIBLE``. custom_job (str): Output only. The CustomJob name linked to the Trial. It's set for a HyperparameterTuningJob's @@ -79,12 +102,16 @@ class Parameter(proto.Message): parameter_id = proto.Field(proto.STRING, number=1,) value = proto.Field(proto.MESSAGE, number=2, message=struct_pb2.Value,) + name = proto.Field(proto.STRING, number=1,) id = proto.Field(proto.STRING, number=2,) state = proto.Field(proto.ENUM, number=3, enum=State,) parameters = proto.RepeatedField(proto.MESSAGE, number=4, message=Parameter,) final_measurement = proto.Field(proto.MESSAGE, number=5, message="Measurement",) + measurements = proto.RepeatedField(proto.MESSAGE, number=6, message="Measurement",) start_time = proto.Field(proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,) end_time = proto.Field(proto.MESSAGE, number=8, message=timestamp_pb2.Timestamp,) + client_id = proto.Field(proto.STRING, number=9,) + infeasible_reason = proto.Field(proto.STRING, number=10,) custom_job = proto.Field(proto.STRING, number=11,) @@ -203,10 +230,18 @@ class DoubleValueSpec(proto.Message): max_value (float): Required. Inclusive maximum value of the parameter. + default_value (float): + A default value for a ``DOUBLE`` parameter that is assumed + to be a relatively good starting point. Unset value signals + that there is no offered starting point. + + Currently only supported by the Vizier service. Not + supported by HyperparamterTuningJob or TrainingPipeline. """ min_value = proto.Field(proto.DOUBLE, number=1,) max_value = proto.Field(proto.DOUBLE, number=2,) + default_value = proto.Field(proto.DOUBLE, number=4, optional=True,) class IntegerValueSpec(proto.Message): r"""Value specification for a parameter in ``INTEGER`` type. @@ -217,19 +252,35 @@ class IntegerValueSpec(proto.Message): max_value (int): Required. Inclusive maximum value of the parameter. + default_value (int): + A default value for an ``INTEGER`` parameter that is assumed + to be a relatively good starting point. Unset value signals + that there is no offered starting point. + + Currently only supported by the Vizier service. Not + supported by HyperparamterTuningJob or TrainingPipeline. """ min_value = proto.Field(proto.INT64, number=1,) max_value = proto.Field(proto.INT64, number=2,) + default_value = proto.Field(proto.INT64, number=4, optional=True,) class CategoricalValueSpec(proto.Message): r"""Value specification for a parameter in ``CATEGORICAL`` type. Attributes: values (Sequence[str]): Required. The list of possible categories. + default_value (str): + A default value for a ``CATEGORICAL`` parameter that is + assumed to be a relatively good starting point. Unset value + signals that there is no offered starting point. + + Currently only supported by the Vizier service. Not + supported by HyperparamterTuningJob or TrainingPipeline. """ values = proto.RepeatedField(proto.STRING, number=1,) + default_value = proto.Field(proto.STRING, number=3, optional=True,) class DiscreteValueSpec(proto.Message): r"""Value specification for a parameter in ``DISCRETE`` type. @@ -241,9 +292,18 @@ class DiscreteValueSpec(proto.Message): might have possible settings of 1.5, 2.5, and 4.0. This list should not contain more than 1,000 values. + default_value (float): + A default value for a ``DISCRETE`` parameter that is assumed + to be a relatively good starting point. Unset value signals + that there is no offered starting point. It automatically + rounds to the nearest feasible discrete point. + + Currently only supported by the Vizier service. Not + supported by HyperparamterTuningJob or TrainingPipeline. """ values = proto.RepeatedField(proto.DOUBLE, number=1,) + default_value = proto.Field(proto.DOUBLE, number=3, optional=True,) class ConditionalParameterSpec(proto.Message): r"""Represents a parameter spec with condition from its parent @@ -376,6 +436,9 @@ class Measurement(proto.Message): suggested hyperparameter values. Attributes: + elapsed_duration (google.protobuf.duration_pb2.Duration): + Output only. Time that the Trial has been + running at the point of this Measurement. step_count (int): Output only. The number of steps the machine learning model has been trained for. Must be @@ -400,6 +463,9 @@ class Metric(proto.Message): metric_id = proto.Field(proto.STRING, number=1,) value = proto.Field(proto.DOUBLE, number=2,) + elapsed_duration = proto.Field( + proto.MESSAGE, number=1, message=duration_pb2.Duration, + ) step_count = proto.Field(proto.INT64, number=2,) metrics = proto.RepeatedField(proto.MESSAGE, number=3, message=Metric,) diff --git a/google/cloud/aiplatform_v1/types/value.py b/google/cloud/aiplatform_v1/types/value.py new file mode 100644 index 0000000000..acc5f50517 --- /dev/null +++ b/google/cloud/aiplatform_v1/types/value.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module(package="google.cloud.aiplatform.v1", manifest={"Value",},) + + +class Value(proto.Message): + r"""Value is the value of the field. + Attributes: + int_value (int): + An integer value. + double_value (float): + A double value. + string_value (str): + A string value. + """ + + int_value = proto.Field(proto.INT64, number=1, oneof="value",) + double_value = proto.Field(proto.DOUBLE, number=2, oneof="value",) + string_value = proto.Field(proto.STRING, number=3, oneof="value",) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/tests/unit/gapic/aiplatform_v1/test_job_service.py b/tests/unit/gapic/aiplatform_v1/test_job_service.py index 05b3e83e9d..9cf14fcb76 100644 --- a/tests/unit/gapic/aiplatform_v1/test_job_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_job_service.py @@ -6227,11 +6227,33 @@ def test_parse_model_path(): assert expected == actual -def test_trial_path(): +def test_network_path(): project = "squid" - location = "clam" - study = "whelk" - trial = "octopus" + network = "clam" + expected = "projects/{project}/global/networks/{network}".format( + project=project, network=network, + ) + actual = JobServiceClient.network_path(project, network) + assert expected == actual + + +def test_parse_network_path(): + expected = { + "project": "whelk", + "network": "octopus", + } + path = JobServiceClient.network_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_network_path(path) + assert expected == actual + + +def test_trial_path(): + project = "oyster" + location = "nudibranch" + study = "cuttlefish" + trial = "mussel" expected = "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format( project=project, location=location, study=study, trial=trial, ) @@ -6241,10 +6263,10 @@ def test_trial_path(): def test_parse_trial_path(): expected = { - "project": "oyster", - "location": "nudibranch", - "study": "cuttlefish", - "trial": "mussel", + "project": "winkle", + "location": "nautilus", + "study": "scallop", + "trial": "abalone", } path = JobServiceClient.trial_path(**expected) @@ -6254,7 +6276,7 @@ def test_parse_trial_path(): def test_common_billing_account_path(): - billing_account = "winkle" + billing_account = "squid" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -6264,7 +6286,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "nautilus", + "billing_account": "clam", } path = JobServiceClient.common_billing_account_path(**expected) @@ -6274,7 +6296,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "scallop" + folder = "whelk" expected = "folders/{folder}".format(folder=folder,) actual = JobServiceClient.common_folder_path(folder) assert expected == actual @@ -6282,7 +6304,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "abalone", + "folder": "octopus", } path = JobServiceClient.common_folder_path(**expected) @@ -6292,7 +6314,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "squid" + organization = "oyster" expected = "organizations/{organization}".format(organization=organization,) actual = JobServiceClient.common_organization_path(organization) assert expected == actual @@ -6300,7 +6322,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "clam", + "organization": "nudibranch", } path = JobServiceClient.common_organization_path(**expected) @@ -6310,7 +6332,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "whelk" + project = "cuttlefish" expected = "projects/{project}".format(project=project,) actual = JobServiceClient.common_project_path(project) assert expected == actual @@ -6318,7 +6340,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "octopus", + "project": "mussel", } path = JobServiceClient.common_project_path(**expected) @@ -6328,8 +6350,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "oyster" - location = "nudibranch" + project = "winkle" + location = "nautilus" expected = "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -6339,8 +6361,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "cuttlefish", - "location": "mussel", + "project": "scallop", + "location": "abalone", } path = JobServiceClient.common_location_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py b/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py index 1178cefbb6..4b6a976023 100644 --- a/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py @@ -43,16 +43,22 @@ from google.cloud.aiplatform_v1.services.pipeline_service.transports.base import ( _GOOGLE_AUTH_VERSION, ) +from google.cloud.aiplatform_v1.types import artifact +from google.cloud.aiplatform_v1.types import context from google.cloud.aiplatform_v1.types import deployed_model_ref from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import env_var +from google.cloud.aiplatform_v1.types import execution from google.cloud.aiplatform_v1.types import io from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import pipeline_state from google.cloud.aiplatform_v1.types import training_pipeline from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline +from google.cloud.aiplatform_v1.types import value from google.longrunning import operations_pb2 from google.oauth2 import service_account from google.protobuf import any_pb2 # type: ignore @@ -1820,6 +1826,1286 @@ async def test_cancel_training_pipeline_flattened_error_async(): ) +def test_create_pipeline_job( + transport: str = "grpc", request_type=pipeline_service.CreatePipelineJobRequest +): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_pipeline_job.PipelineJob( + name="name_value", + display_name="display_name_value", + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + service_account="service_account_value", + network="network_value", + ) + response = client.create_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreatePipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_pipeline_job.PipelineJob) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + assert response.service_account == "service_account_value" + assert response.network == "network_value" + + +def test_create_pipeline_job_from_dict(): + test_create_pipeline_job(request_type=dict) + + +def test_create_pipeline_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), "__call__" + ) as call: + client.create_pipeline_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreatePipelineJobRequest() + + +@pytest.mark.asyncio +async def test_create_pipeline_job_async( + transport: str = "grpc_asyncio", + request_type=pipeline_service.CreatePipelineJobRequest, +): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_pipeline_job.PipelineJob( + name="name_value", + display_name="display_name_value", + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + service_account="service_account_value", + network="network_value", + ) + ) + response = await client.create_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreatePipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_pipeline_job.PipelineJob) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + assert response.service_account == "service_account_value" + assert response.network == "network_value" + + +@pytest.mark.asyncio +async def test_create_pipeline_job_async_from_dict(): + await test_create_pipeline_job_async(request_type=dict) + + +def test_create_pipeline_job_field_headers(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CreatePipelineJobRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), "__call__" + ) as call: + call.return_value = gca_pipeline_job.PipelineJob() + client.create_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_pipeline_job_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CreatePipelineJobRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_pipeline_job.PipelineJob() + ) + await client.create_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_pipeline_job_flattened(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_pipeline_job.PipelineJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_pipeline_job( + parent="parent_value", + pipeline_job=gca_pipeline_job.PipelineJob(name="name_value"), + pipeline_job_id="pipeline_job_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + assert args[0].pipeline_job == gca_pipeline_job.PipelineJob(name="name_value") + assert args[0].pipeline_job_id == "pipeline_job_id_value" + + +def test_create_pipeline_job_flattened_error(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_pipeline_job( + pipeline_service.CreatePipelineJobRequest(), + parent="parent_value", + pipeline_job=gca_pipeline_job.PipelineJob(name="name_value"), + pipeline_job_id="pipeline_job_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_pipeline_job_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_pipeline_job.PipelineJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_pipeline_job.PipelineJob() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_pipeline_job( + parent="parent_value", + pipeline_job=gca_pipeline_job.PipelineJob(name="name_value"), + pipeline_job_id="pipeline_job_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + assert args[0].pipeline_job == gca_pipeline_job.PipelineJob(name="name_value") + assert args[0].pipeline_job_id == "pipeline_job_id_value" + + +@pytest.mark.asyncio +async def test_create_pipeline_job_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_pipeline_job( + pipeline_service.CreatePipelineJobRequest(), + parent="parent_value", + pipeline_job=gca_pipeline_job.PipelineJob(name="name_value"), + pipeline_job_id="pipeline_job_id_value", + ) + + +def test_get_pipeline_job( + transport: str = "grpc", request_type=pipeline_service.GetPipelineJobRequest +): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_job.PipelineJob( + name="name_value", + display_name="display_name_value", + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + service_account="service_account_value", + network="network_value", + ) + response = client.get_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetPipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pipeline_job.PipelineJob) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + assert response.service_account == "service_account_value" + assert response.network == "network_value" + + +def test_get_pipeline_job_from_dict(): + test_get_pipeline_job(request_type=dict) + + +def test_get_pipeline_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call: + client.get_pipeline_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetPipelineJobRequest() + + +@pytest.mark.asyncio +async def test_get_pipeline_job_async( + transport: str = "grpc_asyncio", request_type=pipeline_service.GetPipelineJobRequest +): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + pipeline_job.PipelineJob( + name="name_value", + display_name="display_name_value", + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + service_account="service_account_value", + network="network_value", + ) + ) + response = await client.get_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetPipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pipeline_job.PipelineJob) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + assert response.service_account == "service_account_value" + assert response.network == "network_value" + + +@pytest.mark.asyncio +async def test_get_pipeline_job_async_from_dict(): + await test_get_pipeline_job_async(request_type=dict) + + +def test_get_pipeline_job_field_headers(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.GetPipelineJobRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call: + call.return_value = pipeline_job.PipelineJob() + client.get_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_pipeline_job_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.GetPipelineJobRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + pipeline_job.PipelineJob() + ) + await client.get_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_pipeline_job_flattened(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_job.PipelineJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_pipeline_job(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +def test_get_pipeline_job_flattened_error(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_pipeline_job( + pipeline_service.GetPipelineJobRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_pipeline_job_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_job.PipelineJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + pipeline_job.PipelineJob() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_pipeline_job(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_pipeline_job_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_pipeline_job( + pipeline_service.GetPipelineJobRequest(), name="name_value", + ) + + +def test_list_pipeline_jobs( + transport: str = "grpc", request_type=pipeline_service.ListPipelineJobsRequest +): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListPipelineJobsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_pipeline_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListPipelineJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPipelineJobsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_pipeline_jobs_from_dict(): + test_list_pipeline_jobs(request_type=dict) + + +def test_list_pipeline_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), "__call__" + ) as call: + client.list_pipeline_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListPipelineJobsRequest() + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_async( + transport: str = "grpc_asyncio", + request_type=pipeline_service.ListPipelineJobsRequest, +): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + pipeline_service.ListPipelineJobsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_pipeline_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListPipelineJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPipelineJobsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_async_from_dict(): + await test_list_pipeline_jobs_async(request_type=dict) + + +def test_list_pipeline_jobs_field_headers(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.ListPipelineJobsRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), "__call__" + ) as call: + call.return_value = pipeline_service.ListPipelineJobsResponse() + client.list_pipeline_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.ListPipelineJobsRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + pipeline_service.ListPipelineJobsResponse() + ) + await client.list_pipeline_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_pipeline_jobs_flattened(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListPipelineJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_pipeline_jobs(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +def test_list_pipeline_jobs_flattened_error(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_pipeline_jobs( + pipeline_service.ListPipelineJobsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListPipelineJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + pipeline_service.ListPipelineJobsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_pipeline_jobs(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_pipeline_jobs( + pipeline_service.ListPipelineJobsRequest(), parent="parent_value", + ) + + +def test_list_pipeline_jobs_pager(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + next_page_token="abc", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[], next_page_token="def", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[pipeline_job.PipelineJob(),], next_page_token="ghi", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[pipeline_job.PipelineJob(), pipeline_job.PipelineJob(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_pipeline_jobs(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, pipeline_job.PipelineJob) for i in results) + + +def test_list_pipeline_jobs_pages(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + next_page_token="abc", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[], next_page_token="def", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[pipeline_job.PipelineJob(),], next_page_token="ghi", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[pipeline_job.PipelineJob(), pipeline_job.PipelineJob(),], + ), + RuntimeError, + ) + pages = list(client.list_pipeline_jobs(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_async_pager(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + next_page_token="abc", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[], next_page_token="def", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[pipeline_job.PipelineJob(),], next_page_token="ghi", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[pipeline_job.PipelineJob(), pipeline_job.PipelineJob(),], + ), + RuntimeError, + ) + async_pager = await client.list_pipeline_jobs(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, pipeline_job.PipelineJob) for i in responses) + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_async_pages(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + next_page_token="abc", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[], next_page_token="def", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[pipeline_job.PipelineJob(),], next_page_token="ghi", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[pipeline_job.PipelineJob(), pipeline_job.PipelineJob(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_pipeline_jobs(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_delete_pipeline_job( + transport: str = "grpc", request_type=pipeline_service.DeletePipelineJobRequest +): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeletePipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_pipeline_job_from_dict(): + test_delete_pipeline_job(request_type=dict) + + +def test_delete_pipeline_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), "__call__" + ) as call: + client.delete_pipeline_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeletePipelineJobRequest() + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_async( + transport: str = "grpc_asyncio", + request_type=pipeline_service.DeletePipelineJobRequest, +): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeletePipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_async_from_dict(): + await test_delete_pipeline_job_async(request_type=dict) + + +def test_delete_pipeline_job_field_headers(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.DeletePipelineJobRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.DeletePipelineJobRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.delete_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_pipeline_job_flattened(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_pipeline_job(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +def test_delete_pipeline_job_flattened_error(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_pipeline_job( + pipeline_service.DeletePipelineJobRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_pipeline_job(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_pipeline_job( + pipeline_service.DeletePipelineJobRequest(), name="name_value", + ) + + +def test_cancel_pipeline_job( + transport: str = "grpc", request_type=pipeline_service.CancelPipelineJobRequest +): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelPipelineJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_pipeline_job_from_dict(): + test_cancel_pipeline_job(request_type=dict) + + +def test_cancel_pipeline_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), "__call__" + ) as call: + client.cancel_pipeline_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelPipelineJobRequest() + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_async( + transport: str = "grpc_asyncio", + request_type=pipeline_service.CancelPipelineJobRequest, +): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelPipelineJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_async_from_dict(): + await test_cancel_pipeline_job_async(request_type=dict) + + +def test_cancel_pipeline_job_field_headers(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CancelPipelineJobRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), "__call__" + ) as call: + call.return_value = None + client.cancel_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CancelPipelineJobRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_cancel_pipeline_job_flattened(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_pipeline_job(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +def test_cancel_pipeline_job_flattened_error(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_pipeline_job( + pipeline_service.CancelPipelineJobRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_pipeline_job(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_pipeline_job( + pipeline_service.CancelPipelineJobRequest(), name="name_value", + ) + + def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.PipelineServiceGrpcTransport( @@ -1922,6 +3208,11 @@ def test_pipeline_service_base_transport(): "list_training_pipelines", "delete_training_pipeline", "cancel_training_pipeline", + "create_pipeline_job", + "get_pipeline_job", + "list_pipeline_jobs", + "delete_pipeline_job", + "cancel_pipeline_job", ) for method in methods: with pytest.raises(NotImplementedError): @@ -2301,10 +3592,96 @@ def test_pipeline_service_grpc_lro_async_client(): assert transport.operations_client is transport.operations_client -def test_endpoint_path(): +def test_artifact_path(): project = "squid" location = "clam" - endpoint = "whelk" + metadata_store = "whelk" + artifact = "octopus" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format( + project=project, + location=location, + metadata_store=metadata_store, + artifact=artifact, + ) + actual = PipelineServiceClient.artifact_path( + project, location, metadata_store, artifact + ) + assert expected == actual + + +def test_parse_artifact_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "metadata_store": "cuttlefish", + "artifact": "mussel", + } + path = PipelineServiceClient.artifact_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_artifact_path(path) + assert expected == actual + + +def test_context_path(): + project = "winkle" + location = "nautilus" + metadata_store = "scallop" + context = "abalone" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format( + project=project, + location=location, + metadata_store=metadata_store, + context=context, + ) + actual = PipelineServiceClient.context_path( + project, location, metadata_store, context + ) + assert expected == actual + + +def test_parse_context_path(): + expected = { + "project": "squid", + "location": "clam", + "metadata_store": "whelk", + "context": "octopus", + } + path = PipelineServiceClient.context_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_context_path(path) + assert expected == actual + + +def test_custom_job_path(): + project = "oyster" + location = "nudibranch" + custom_job = "cuttlefish" + expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format( + project=project, location=location, custom_job=custom_job, + ) + actual = PipelineServiceClient.custom_job_path(project, location, custom_job) + assert expected == actual + + +def test_parse_custom_job_path(): + expected = { + "project": "mussel", + "location": "winkle", + "custom_job": "nautilus", + } + path = PipelineServiceClient.custom_job_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_custom_job_path(path) + assert expected == actual + + +def test_endpoint_path(): + project = "scallop" + location = "abalone" + endpoint = "squid" expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format( project=project, location=location, endpoint=endpoint, ) @@ -2314,9 +3691,9 @@ def test_endpoint_path(): def test_parse_endpoint_path(): expected = { - "project": "octopus", - "location": "oyster", - "endpoint": "nudibranch", + "project": "clam", + "location": "whelk", + "endpoint": "octopus", } path = PipelineServiceClient.endpoint_path(**expected) @@ -2325,10 +3702,41 @@ def test_parse_endpoint_path(): assert expected == actual +def test_execution_path(): + project = "oyster" + location = "nudibranch" + metadata_store = "cuttlefish" + execution = "mussel" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format( + project=project, + location=location, + metadata_store=metadata_store, + execution=execution, + ) + actual = PipelineServiceClient.execution_path( + project, location, metadata_store, execution + ) + assert expected == actual + + +def test_parse_execution_path(): + expected = { + "project": "winkle", + "location": "nautilus", + "metadata_store": "scallop", + "execution": "abalone", + } + path = PipelineServiceClient.execution_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_execution_path(path) + assert expected == actual + + def test_model_path(): - project = "cuttlefish" - location = "mussel" - model = "winkle" + project = "squid" + location = "clam" + model = "whelk" expected = "projects/{project}/locations/{location}/models/{model}".format( project=project, location=location, model=model, ) @@ -2338,9 +3746,9 @@ def test_model_path(): def test_parse_model_path(): expected = { - "project": "nautilus", - "location": "scallop", - "model": "abalone", + "project": "octopus", + "location": "oyster", + "model": "nudibranch", } path = PipelineServiceClient.model_path(**expected) @@ -2349,10 +3757,56 @@ def test_parse_model_path(): assert expected == actual +def test_network_path(): + project = "cuttlefish" + network = "mussel" + expected = "projects/{project}/global/networks/{network}".format( + project=project, network=network, + ) + actual = PipelineServiceClient.network_path(project, network) + assert expected == actual + + +def test_parse_network_path(): + expected = { + "project": "winkle", + "network": "nautilus", + } + path = PipelineServiceClient.network_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_network_path(path) + assert expected == actual + + +def test_pipeline_job_path(): + project = "scallop" + location = "abalone" + pipeline_job = "squid" + expected = "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format( + project=project, location=location, pipeline_job=pipeline_job, + ) + actual = PipelineServiceClient.pipeline_job_path(project, location, pipeline_job) + assert expected == actual + + +def test_parse_pipeline_job_path(): + expected = { + "project": "clam", + "location": "whelk", + "pipeline_job": "octopus", + } + path = PipelineServiceClient.pipeline_job_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_pipeline_job_path(path) + assert expected == actual + + def test_training_pipeline_path(): - project = "squid" - location = "clam" - training_pipeline = "whelk" + project = "oyster" + location = "nudibranch" + training_pipeline = "cuttlefish" expected = "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format( project=project, location=location, training_pipeline=training_pipeline, ) @@ -2364,9 +3818,9 @@ def test_training_pipeline_path(): def test_parse_training_pipeline_path(): expected = { - "project": "octopus", - "location": "oyster", - "training_pipeline": "nudibranch", + "project": "mussel", + "location": "winkle", + "training_pipeline": "nautilus", } path = PipelineServiceClient.training_pipeline_path(**expected) @@ -2376,7 +3830,7 @@ def test_parse_training_pipeline_path(): def test_common_billing_account_path(): - billing_account = "cuttlefish" + billing_account = "scallop" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -2386,7 +3840,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "mussel", + "billing_account": "abalone", } path = PipelineServiceClient.common_billing_account_path(**expected) @@ -2396,7 +3850,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "winkle" + folder = "squid" expected = "folders/{folder}".format(folder=folder,) actual = PipelineServiceClient.common_folder_path(folder) assert expected == actual @@ -2404,7 +3858,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "nautilus", + "folder": "clam", } path = PipelineServiceClient.common_folder_path(**expected) @@ -2414,7 +3868,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "scallop" + organization = "whelk" expected = "organizations/{organization}".format(organization=organization,) actual = PipelineServiceClient.common_organization_path(organization) assert expected == actual @@ -2422,7 +3876,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "abalone", + "organization": "octopus", } path = PipelineServiceClient.common_organization_path(**expected) @@ -2432,7 +3886,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "squid" + project = "oyster" expected = "projects/{project}".format(project=project,) actual = PipelineServiceClient.common_project_path(project) assert expected == actual @@ -2440,7 +3894,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "clam", + "project": "nudibranch", } path = PipelineServiceClient.common_project_path(**expected) @@ -2450,8 +3904,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "whelk" - location = "octopus" + project = "cuttlefish" + location = "mussel" expected = "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -2461,8 +3915,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "oyster", - "location": "nudibranch", + "project": "winkle", + "location": "nautilus", } path = PipelineServiceClient.common_location_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1/test_prediction_service.py b/tests/unit/gapic/aiplatform_v1/test_prediction_service.py index 188abef4d6..2652b0ddee 100644 --- a/tests/unit/gapic/aiplatform_v1/test_prediction_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_prediction_service.py @@ -651,8 +651,8 @@ def test_predict_flattened_error(): client.predict( prediction_service.PredictRequest(), endpoint="endpoint_value", - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], ) @@ -668,8 +668,8 @@ async def test_predict_flattened_error_async(): await client.predict( prediction_service.PredictRequest(), endpoint="endpoint_value", - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], )