From e346117d5453358a32a1d6e584613ace5c2251d9 Mon Sep 17 00:00:00 2001 From: sasha-gitg <44654632+sasha-gitg@users.noreply.github.com> Date: Thu, 29 Jul 2021 13:12:04 -0400 Subject: [PATCH] feat: add wait for creation and more informative exception when properties are not available (#566) --- google/cloud/aiplatform/base.py | 69 ++++++ google/cloud/aiplatform/datasets/dataset.py | 1 + .../aiplatform/datasets/tabular_dataset.py | 2 + google/cloud/aiplatform/jobs.py | 31 ++- google/cloud/aiplatform/models.py | 12 +- google/cloud/aiplatform/pipeline_jobs.py | 37 ++-- google/cloud/aiplatform/training_jobs.py | 11 +- .../test_automl_forecasting_training_jobs.py | 2 +- .../test_automl_image_training_jobs.py | 2 +- .../test_automl_tabular_training_jobs.py | 197 +++++++++++++++++- .../test_automl_text_training_jobs.py | 2 +- .../test_automl_video_training_jobs.py | 2 +- tests/unit/aiplatform/test_custom_job.py | 66 +++++- tests/unit/aiplatform/test_datasets.py | 101 ++++++++- tests/unit/aiplatform/test_endpoints.py | 30 ++- .../test_hyperparameter_tuning_job.py | 84 +++++++- tests/unit/aiplatform/test_jobs.py | 54 +++++ tests/unit/aiplatform/test_models.py | 43 ++++ tests/unit/aiplatform/test_pipeline_jobs.py | 2 +- tests/unit/aiplatform/test_training_jobs.py | 2 +- 20 files changed, 685 insertions(+), 65 deletions(-) diff --git a/google/cloud/aiplatform/base.py b/google/cloud/aiplatform/base.py index a6377ece86..1a3eed8add 100644 --- a/google/cloud/aiplatform/base.py +++ b/google/cloud/aiplatform/base.py @@ -23,6 +23,7 @@ import logging import sys import threading +import time from typing import ( Any, Callable, @@ -540,21 +541,25 @@ def _sync_gca_resource(self): @property def name(self) -> str: """Name of this resource.""" + self._assert_gca_resource_is_available() return self._gca_resource.name.split("/")[-1] @property def resource_name(self) -> str: """Full qualified resource name.""" + self._assert_gca_resource_is_available() return self._gca_resource.name @property def display_name(self) -> str: """Display name of this resource.""" + self._assert_gca_resource_is_available() return self._gca_resource.display_name @property def create_time(self) -> datetime.datetime: """Time this resource was created.""" + self._assert_gca_resource_is_available() return self._gca_resource.create_time @property @@ -570,6 +575,7 @@ def encryption_spec(self) -> Optional[gca_encryption_spec.EncryptionSpec]: If this is set, then all resources created by this Vertex AI resource will be encrypted with the provided encryption key. """ + self._assert_gca_resource_is_available() return getattr(self._gca_resource, "encryption_spec") @property @@ -578,13 +584,26 @@ def labels(self) -> Dict[str, str]: Read more about labels at https://goo.gl/xmQnxf """ + self._assert_gca_resource_is_available() return self._gca_resource.labels @property def gca_resource(self) -> proto.Message: """The underlying resource proto representation.""" + self._assert_gca_resource_is_available() return self._gca_resource + def _assert_gca_resource_is_available(self) -> None: + """Helper method to raise when property is not accessible. + + Raises: + RuntimeError if _gca_resource is has not been created. + """ + if self._gca_resource is None: + raise RuntimeError( + f"{self.__class__.__name__} resource has not been created" + ) + def __repr__(self) -> str: return f"{object.__repr__(self)} \nresource name: {self.resource_name}" @@ -1061,6 +1080,56 @@ def __repr__(self) -> str: return FutureManager.__repr__(self) + def _wait_for_resource_creation(self) -> None: + """Wait until underlying resource is created. + + Currently this should only be used on subclasses that implement the construct then + `run` pattern because the underlying sync=False implementation will not update + downstream resource noun object's _gca_resource until the entire invoked method is complete. + + Ex: + job = CustomTrainingJob() + job.run(sync=False, ...) + job._wait_for_resource_creation() + Raises: + RuntimeError if the resource has not been scheduled to be created. + """ + + # If the user calls this but didn't actually invoke an API to create + if self._are_futures_done() and not getattr(self._gca_resource, "name", None): + self._raise_future_exception() + raise RuntimeError( + f"{self.__class__.__name__} resource is not scheduled to be created." + ) + + while not getattr(self._gca_resource, "name", None): + # breaks out of loop if creation has failed async + if self._are_futures_done() and not getattr( + self._gca_resource, "name", None + ): + self._raise_future_exception() + + time.sleep(1) + + def _assert_gca_resource_is_available(self) -> None: + """Helper method to raise when accessing properties that do not exist. + + Overrides VertexAiResourceNoun to provide a more informative exception if + resource creation has failed asynchronously. + + Raises: + RuntimeError when resource has not been created. + """ + if not getattr(self._gca_resource, "name", None): + raise RuntimeError( + f"{self.__class__.__name__} resource has not been created." + + ( + f" Resource failed with: {self._exception}" + if self._exception + else "" + ) + ) + def get_annotation_class(annotation: type) -> type: """Helper method to retrieve type annotation. diff --git a/google/cloud/aiplatform/datasets/dataset.py b/google/cloud/aiplatform/datasets/dataset.py index 72627d120f..c41b252869 100644 --- a/google/cloud/aiplatform/datasets/dataset.py +++ b/google/cloud/aiplatform/datasets/dataset.py @@ -84,6 +84,7 @@ def __init__( @property def metadata_schema_uri(self) -> str: """The metadata schema uri of this dataset resource.""" + self._assert_gca_resource_is_available() return self._gca_resource.metadata_schema_uri def _validate_metadata_schema_uri(self) -> None: diff --git a/google/cloud/aiplatform/datasets/tabular_dataset.py b/google/cloud/aiplatform/datasets/tabular_dataset.py index 0e812892e4..1fe23f5ee2 100644 --- a/google/cloud/aiplatform/datasets/tabular_dataset.py +++ b/google/cloud/aiplatform/datasets/tabular_dataset.py @@ -52,6 +52,8 @@ def column_names(self) -> List[str]: RuntimeError: When no valid source is found. """ + self._assert_gca_resource_is_available() + metadata = self._gca_resource.metadata if metadata is None: diff --git a/google/cloud/aiplatform/jobs.py b/google/cloud/aiplatform/jobs.py index cd0ae48e66..6cc549027b 100644 --- a/google/cloud/aiplatform/jobs.py +++ b/google/cloud/aiplatform/jobs.py @@ -330,6 +330,7 @@ def output_info(self,) -> Optional[aiplatform.gapic.BatchPredictionJob.OutputInf This is only available for batch predicition jobs that have run successfully. """ + self._assert_gca_resource_is_available() return self._gca_resource.output_info @property @@ -337,11 +338,13 @@ def partial_failures(self) -> Optional[Sequence[status_pb2.Status]]: """Partial failures encountered. For example, single files that can't be read. This field never exceeds 20 entries. Status details fields contain standard GCP error details.""" + self._assert_gca_resource_is_available() return getattr(self._gca_resource, "partial_failures") @property def completion_stats(self) -> Optional[gca_completion_stats.CompletionStats]: """Statistics on completed and failed prediction instances.""" + self._assert_gca_resource_is_available() return getattr(self._gca_resource, "completion_stats") @classmethod @@ -772,6 +775,8 @@ def iter_outputs( GCS or BQ output provided. """ + self._assert_gca_resource_is_available() + if self.state != gca_job_state.JobState.JOB_STATE_SUCCEEDED: raise RuntimeError( f"Cannot read outputs until BatchPredictionJob has succeeded, " @@ -859,23 +864,6 @@ def __init__( def run(self) -> None: pass - @property - def _has_run(self) -> bool: - """Property returns true if this class has a resource name.""" - return bool(self._gca_resource.name) - - @property - def state(self) -> gca_job_state.JobState: - """Current state of job. - - Raises: - RuntimeError if job run has not been called. - """ - if not self._has_run: - raise RuntimeError("Job has not run. No state available.") - - return super().state - @classmethod def get( cls, @@ -913,6 +901,10 @@ def get( return self + def wait_for_resource_creation(self) -> None: + """Waits until resource has been created.""" + self._wait_for_resource_creation() + class DataLabelingJob(_Job): _resource_noun = "dataLabelingJobs" @@ -1041,7 +1033,8 @@ def network(self) -> Optional[str]: Private services access must already be configured for the network. If left unspecified, the CustomJob is not peered with any network. """ - return getattr(self._gca_resource, "network") + self._assert_gca_resource_is_available() + return self._gca_resource.job_spec.network @classmethod def from_local_script( @@ -1512,6 +1505,7 @@ def network(self) -> Optional[str]: Private services access must already be configured for the network. If left unspecified, the HyperparameterTuningJob is not peered with any network. """ + self._assert_gca_resource_is_available() return getattr(self._gca_resource.trial_job_spec, "network") @base.optional_sync() @@ -1612,4 +1606,5 @@ def run( @property def trials(self) -> List[gca_study_compat.Trial]: + self._assert_gca_resource_is_available() return list(self._gca_resource.trials) diff --git a/google/cloud/aiplatform/models.py b/google/cloud/aiplatform/models.py index d0dac8c89a..9c53ff5a2d 100644 --- a/google/cloud/aiplatform/models.py +++ b/google/cloud/aiplatform/models.py @@ -146,7 +146,8 @@ def network(self) -> Optional[str]: Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. """ - return getattr(self._gca_resource, "network") + self._assert_gca_resource_is_available() + return getattr(self._gca_resource, "network", None) @classmethod def create( @@ -1283,11 +1284,13 @@ class Model(base.VertexAiResourceNounWithFutureManager): def uri(self) -> Optional[str]: """Path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models.""" + self._assert_gca_resource_is_available() return self._gca_resource.artifact_uri or None @property def description(self) -> str: """Description of the model.""" + self._assert_gca_resource_is_available() return self._gca_resource.description @property @@ -1302,6 +1305,7 @@ def supported_export_formats( {'tf-saved-model': []} """ + self._assert_gca_resource_is_available() return { export_format.id: [ gca_model_compat.Model.ExportFormat.ExportableContent(content) @@ -1328,6 +1332,7 @@ def supported_deployment_resources_types( predictions by using a `BatchPredictionJob`, if it has at least one entry each in `Model.supported_input_storage_formats` and `Model.supported_output_storage_formats`.""" + self._assert_gca_resource_is_available() return list(self._gca_resource.supported_deployment_resources_types) @property @@ -1343,6 +1348,7 @@ def supported_input_storage_formats(self) -> List[str]: `supported_deployment_resources_types`, it could serve online predictions by using `Endpoint.predict()` or `Endpoint.explain()`. """ + self._assert_gca_resource_is_available() return list(self._gca_resource.supported_input_storage_formats) @property @@ -1363,12 +1369,14 @@ def supported_output_storage_formats(self) -> List[str]: `supported_deployment_resources_types`, it could serve online predictions by using `Endpoint.predict()` or `Endpoint.explain()`. """ + self._assert_gca_resource_is_available() return list(self._gca_resource.supported_output_storage_formats) @property def predict_schemata(self) -> Optional[aiplatform.gapic.PredictSchemata]: """The schemata that describe formats of the Model's predictions and explanations, if available.""" + self._assert_gca_resource_is_available() return getattr(self._gca_resource, "predict_schemata") @property @@ -1379,6 +1387,7 @@ def training_job(self) -> Optional["aiplatform.training_jobs._TrainingJob"]: api_core.exceptions.NotFound: If the Model's training job resource cannot be found on the Vertex service. """ + self._assert_gca_resource_is_available() job_name = getattr(self._gca_resource, "training_pipeline") if not job_name: @@ -1400,6 +1409,7 @@ def training_job(self) -> Optional["aiplatform.training_jobs._TrainingJob"]: def container_spec(self) -> Optional[aiplatform.gapic.ModelContainerSpec]: """The specification of the container that is to be used when deploying this Model. Not present for AutoML Models.""" + self._assert_gca_resource_is_available() return getattr(self._gca_resource, "container_spec") def __init__( diff --git a/google/cloud/aiplatform/pipeline_jobs.py b/google/cloud/aiplatform/pipeline_jobs.py index 7d252725ef..29a31a3ced 100644 --- a/google/cloud/aiplatform/pipeline_jobs.py +++ b/google/cloud/aiplatform/pipeline_jobs.py @@ -220,6 +220,18 @@ def __init__( ), ) + def _assert_gca_resource_is_available(self) -> None: + # TODO(b/193800063) Change this to name after this fix + if not getattr(self._gca_resource, "create_time", None): + raise RuntimeError( + f"{self.__class__.__name__} resource has not been created." + + ( + f" Resource failed with: {self._exception}" + if self._exception + else "" + ) + ) + @base.optional_sync() def run( self, @@ -236,6 +248,7 @@ def run( network (str): Optional. The full name of the Compute Engine network to which the job should be peered. For example, projects/12345/global/networks/myVPC. + Private services access must already be configured for the network. If left unspecified, the job is not peered with any network. sync (bool): @@ -272,17 +285,9 @@ def pipeline_spec(self): @property def state(self) -> Optional[gca_pipeline_state_v1beta1.PipelineState]: """Current pipeline state.""" - if not self._has_run: - raise RuntimeError("Job has not run. No state available.") - self._sync_gca_resource() return self._gca_resource.state - @property - def _has_run(self) -> bool: - """Helper property to check if this pipeline job has been run.""" - return bool(self._gca_resource.create_time) - @property def has_failed(self) -> bool: """Returns True if pipeline has failed. @@ -300,10 +305,6 @@ def _dashboard_uri(self) -> str: url = f"https://console.cloud.google.com/vertex-ai/locations/{fields.location}/pipelines/runs/{fields.id}?project={fields.project}" return url - def _sync_gca_resource(self): - """Helper method to sync the local gca_source against the service.""" - self._gca_resource = self.api_client.get_pipeline_job(name=self.resource_name) - def _block_until_complete(self): """Helper method to block and check on job until complete.""" # Used these numbers so failures surface fast @@ -377,13 +378,9 @@ def cancel(self) -> None: makes a best effort to cancel the job, but success is not guaranteed. On successful cancellation, the PipelineJob is not deleted; instead it becomes a job with state set to `CANCELLED`. - - Raises: - RuntimeError: If this PipelineJob has not started running. """ - if not self._has_run: - raise RuntimeError( - "This PipelineJob has not been launched, use the `run()` method " - "to start. `cancel()` can only be called on a job that is running." - ) self.api_client.cancel_pipeline_job(name=self.resource_name) + + def wait_for_resource_creation(self) -> None: + """Waits until resource has been created.""" + self._wait_for_resource_creation() diff --git a/google/cloud/aiplatform/training_jobs.py b/google/cloud/aiplatform/training_jobs.py index 48cc8ef035..5f9d7c3445 100644 --- a/google/cloud/aiplatform/training_jobs.py +++ b/google/cloud/aiplatform/training_jobs.py @@ -767,12 +767,6 @@ def _dashboard_uri(self) -> str: url = f"https://console.cloud.google.com/ai/platform/locations/{fields.location}/training/{fields.id}?project={fields.project}" return url - def _sync_gca_resource(self): - """Helper method to sync the local gca_source against the service.""" - self._gca_resource = self.api_client.get_training_pipeline( - name=self.resource_name - ) - @property def _has_run(self) -> bool: """Helper property to check if this training job has been run.""" @@ -859,6 +853,10 @@ def cancel(self) -> None: ) self.api_client.cancel_training_pipeline(name=self.resource_name) + def wait_for_resource_creation(self) -> None: + """Waits until resource has been created.""" + self._wait_for_resource_creation() + class _CustomTrainingJob(_TrainingJob): """ABC for Custom Training Pipelines..""" @@ -1103,6 +1101,7 @@ def network(self) -> Optional[str]: unspecified, the CustomTrainingJob is not peered with any network. """ # Return `network` value in training task inputs if set in Map + self._assert_gca_resource_is_available() return self._gca_resource.training_task_inputs.get("network") def _prepare_and_validate_run( diff --git a/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py b/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py index b3649fa57b..d7b2e85001 100644 --- a/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py @@ -113,7 +113,7 @@ _TEST_MODEL_NAME = "projects/my-project/locations/us-central1/models/12345" _TEST_PIPELINE_RESOURCE_NAME = ( - "projects/my-project/locations/us-central1/trainingPipeline/12345" + "projects/my-project/locations/us-central1/trainingPipelines/12345" ) diff --git a/tests/unit/aiplatform/test_automl_image_training_jobs.py b/tests/unit/aiplatform/test_automl_image_training_jobs.py index 18349136e9..29ce61a8a1 100644 --- a/tests/unit/aiplatform/test_automl_image_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_image_training_jobs.py @@ -76,7 +76,7 @@ ) _TEST_PIPELINE_RESOURCE_NAME = ( - f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/trainingPipeline/12345" + f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/trainingPipelines/12345" ) # CMEK encryption diff --git a/tests/unit/aiplatform/test_automl_tabular_training_jobs.py b/tests/unit/aiplatform/test_automl_tabular_training_jobs.py index 413566440f..78a99ee6e3 100644 --- a/tests/unit/aiplatform/test_automl_tabular_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_tabular_training_jobs.py @@ -131,7 +131,7 @@ _TEST_MODEL_NAME = "projects/my-project/locations/us-central1/models/12345" _TEST_PIPELINE_RESOURCE_NAME = ( - "projects/my-project/locations/us-central1/trainingPipeline/12345" + "projects/my-project/locations/us-central1/trainingPipelines/12345" ) # CMEK encryption @@ -164,6 +164,15 @@ def mock_pipeline_service_create(): yield mock_create_training_pipeline +@pytest.fixture +def mock_pipeline_service_create_fail(): + with mock.patch.object( + pipeline_service_client.PipelineServiceClient, "create_training_pipeline" + ) as mock_create_training_pipeline: + mock_create_training_pipeline.side_effect = RuntimeError("Mock fail") + yield mock_create_training_pipeline + + @pytest.fixture def mock_pipeline_service_get(): with mock.patch.object( @@ -305,6 +314,10 @@ def test_run_call_pipeline_service_create( sync=sync, ) + job.wait_for_resource_creation() + + assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME + if not sync: model_from_job.wait() @@ -386,6 +399,10 @@ def test_run_call_pipeline_if_no_model_display_name( disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, ) + job.wait_for_resource_creation() + + assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME + if not sync: model_from_job.wait() @@ -457,6 +474,10 @@ def test_run_call_pipeline_service_create_if_no_column_transformations( sync=sync, ) + job.wait_for_resource_creation() + + assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME + if not sync: model_from_job.wait() @@ -534,6 +555,10 @@ def test_run_call_pipeline_service_create_if_set_additional_experiments( sync=sync, ) + job.wait_for_resource_creation() + + assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME + if not sync: model_from_job.wait() @@ -784,6 +809,10 @@ def test_run_called_twice_raises(self, mock_dataset_tabular, sync): sync=sync, ) + job.wait_for_resource_creation() + + assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME + with pytest.raises(RuntimeError): job.run( dataset=mock_dataset_tabular, @@ -828,6 +857,116 @@ def test_run_raises_if_pipeline_fails( with pytest.raises(RuntimeError): job.get_model() + def test_wait_for_resource_creation_does_not_fail_if_creation_does_not_fail( + self, mock_pipeline_service_create_and_get_with_fail, mock_dataset_tabular + ): + + aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME) + + job = training_jobs.AutoMLTabularTrainingJob( + display_name=_TEST_DISPLAY_NAME, + optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE, + optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME, + column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS, + optimization_objective_recall_value=None, + optimization_objective_precision_value=None, + ) + + job.run( + model_display_name=_TEST_MODEL_DISPLAY_NAME, + dataset=mock_dataset_tabular, + target_column=_TEST_TRAINING_TARGET_COLUMN, + training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, + validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, + test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + sync=False, + ) + + job.wait_for_resource_creation() + + assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME + + with pytest.raises(RuntimeError): + job.wait() + + with pytest.raises(RuntimeError): + job.get_model() + + @pytest.mark.usefixtures("mock_pipeline_service_create_fail") + @pytest.mark.parametrize("sync", [True, False]) + def test_create_fails(self, mock_dataset_tabular, sync): + + aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME) + + job = training_jobs.AutoMLTabularTrainingJob( + display_name=_TEST_DISPLAY_NAME, + optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE, + optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME, + column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS, + optimization_objective_recall_value=None, + optimization_objective_precision_value=None, + ) + + if sync: + with pytest.raises(RuntimeError) as e: + job.run( + model_display_name=_TEST_MODEL_DISPLAY_NAME, + dataset=mock_dataset_tabular, + target_column=_TEST_TRAINING_TARGET_COLUMN, + training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, + validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, + test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + sync=sync, + ) + assert e.match("Mock fail") + + with pytest.raises(RuntimeError) as e: + job.wait_for_resource_creation() + assert e.match( + regexp=r"AutoMLTabularTrainingJob resource is not scheduled to be created." + ) + + with pytest.raises(RuntimeError) as e: + assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME + assert e.match( + regexp=r"AutoMLTabularTrainingJob resource has not been created." + ) + + job.wait() + + with pytest.raises(RuntimeError) as e: + job.get_model() + e.match( + regexp="TrainingPipeline has not been launched. You must run this TrainingPipeline using TrainingPipeline.run." + ) + + else: + job.run( + model_display_name=_TEST_MODEL_DISPLAY_NAME, + dataset=mock_dataset_tabular, + target_column=_TEST_TRAINING_TARGET_COLUMN, + training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, + validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, + test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + sync=sync, + ) + + with pytest.raises(RuntimeError) as e: + job.wait_for_resource_creation() + assert e.match(regexp=r"Mock fail") + + with pytest.raises(RuntimeError) as e: + assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME + assert e.match( + regexp=r"AutoMLTabularTrainingJob resource has not been created. Resource failed with: Mock fail" + ) + + with pytest.raises(RuntimeError): + job.wait() + + with pytest.raises(RuntimeError): + job.get_model() + def test_raises_before_run_is_called(self, mock_pipeline_service_create): aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME) @@ -848,3 +987,59 @@ def test_raises_before_run_is_called(self, mock_pipeline_service_create): with pytest.raises(RuntimeError): job.state + + with pytest.raises(RuntimeError) as e: + job.wait_for_resource_creation() + assert e.match( + regexp=r"AutoMLTabularTrainingJob resource is not scheduled to be created." + ) + + def test_properties_throw_if_not_available(self): + + job = training_jobs.AutoMLTabularTrainingJob( + display_name=_TEST_DISPLAY_NAME, + optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE, + optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME, + ) + + with pytest.raises(RuntimeError) as e: + job.name + assert e.match( + regexp=r"AutoMLTabularTrainingJob resource has not been created" + ) + + with pytest.raises(RuntimeError) as e: + job.resource_name + assert e.match( + regexp=r"AutoMLTabularTrainingJob resource has not been created" + ) + + with pytest.raises(RuntimeError) as e: + job.display_name + assert e.match( + regexp=r"AutoMLTabularTrainingJob resource has not been created" + ) + + with pytest.raises(RuntimeError) as e: + job.create_time + assert e.match( + regexp=r"AutoMLTabularTrainingJob resource has not been created" + ) + + with pytest.raises(RuntimeError) as e: + job.encryption_spec + assert e.match( + regexp=r"AutoMLTabularTrainingJob resource has not been created" + ) + + with pytest.raises(RuntimeError) as e: + job.labels + assert e.match( + regexp=r"AutoMLTabularTrainingJob resource has not been created" + ) + + with pytest.raises(RuntimeError) as e: + job.gca_resource + assert e.match( + regexp=r"AutoMLTabularTrainingJob resource has not been created" + ) diff --git a/tests/unit/aiplatform/test_automl_text_training_jobs.py b/tests/unit/aiplatform/test_automl_text_training_jobs.py index 3569b8c6fe..4d7cd60527 100644 --- a/tests/unit/aiplatform/test_automl_text_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_text_training_jobs.py @@ -61,7 +61,7 @@ ) _TEST_PIPELINE_RESOURCE_NAME = ( - f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/trainingPipeline/12345" + f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/trainingPipelines/12345" ) # CMEK encryption diff --git a/tests/unit/aiplatform/test_automl_video_training_jobs.py b/tests/unit/aiplatform/test_automl_video_training_jobs.py index 651bd9a388..b3087d0eed 100644 --- a/tests/unit/aiplatform/test_automl_video_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_video_training_jobs.py @@ -57,7 +57,7 @@ ) _TEST_PIPELINE_RESOURCE_NAME = ( - f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/trainingPipeline/12345" + f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/trainingPipelines/12345" ) # CMEK encryption diff --git a/tests/unit/aiplatform/test_custom_job.py b/tests/unit/aiplatform/test_custom_job.py index 7797e0edef..de144d5241 100644 --- a/tests/unit/aiplatform/test_custom_job.py +++ b/tests/unit/aiplatform/test_custom_job.py @@ -162,6 +162,11 @@ def get_custom_job_mock_with_fail(): state=gca_job_state_compat.JobState.JOB_STATE_FAILED, error=status_pb2.Status(message="Test Error"), ), + _get_custom_job_proto( + name=_TEST_CUSTOM_JOB_NAME, + state=gca_job_state_compat.JobState.JOB_STATE_FAILED, + error=status_pb2.Status(message="Test Error"), + ), ] yield get_custom_job_mock @@ -178,6 +183,15 @@ def create_custom_job_mock(): yield create_custom_job_mock +@pytest.fixture +def create_custom_job_mock_fail(): + with mock.patch.object( + job_service_client.JobServiceClient, "create_custom_job" + ) as create_custom_job_mock: + create_custom_job_mock.side_effect = RuntimeError("Mock fail") + yield create_custom_job_mock + + @pytest.fixture def create_custom_job_v1beta1_mock(): with mock.patch.object( @@ -221,6 +235,10 @@ def test_create_custom_job(self, create_custom_job_mock, get_custom_job_mock, sy sync=sync, ) + job.wait_for_resource_creation() + + assert job.resource_name == _TEST_CUSTOM_JOB_NAME + job.wait() expected_custom_job = _get_custom_job_proto() @@ -233,6 +251,7 @@ def test_create_custom_job(self, create_custom_job_mock, get_custom_job_mock, sy assert ( job._gca_resource.state == gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED ) + assert job.network == _TEST_NETWORK @pytest.mark.parametrize("sync", [True, False]) def test_run_custom_job_with_fail_raises( @@ -249,6 +268,10 @@ def test_run_custom_job_with_fail_raises( display_name=_TEST_DISPLAY_NAME, worker_pool_specs=_TEST_WORKER_POOL_SPEC ) + with pytest.raises(RuntimeError) as e: + job.wait_for_resource_creation() + assert e.match(r"CustomJob resource is not scheduled to be created.") + with pytest.raises(RuntimeError): job.run( service_account=_TEST_SERVICE_ACCOUNT, @@ -260,6 +283,10 @@ def test_run_custom_job_with_fail_raises( job.wait() + # shouldn't fail + job.wait_for_resource_creation() + assert job.resource_name == _TEST_CUSTOM_JOB_NAME + expected_custom_job = _get_custom_job_proto() create_custom_job_mock.assert_called_once_with( @@ -267,7 +294,44 @@ def test_run_custom_job_with_fail_raises( ) assert job.job_spec == expected_custom_job.job_spec - assert job._gca_resource.state == gca_job_state_compat.JobState.JOB_STATE_FAILED + assert job.state == gca_job_state_compat.JobState.JOB_STATE_FAILED + + @pytest.mark.usefixtures("create_custom_job_mock_fail") + def test_run_custom_job_with_fail_at_creation(self): + aiplatform.init( + project=_TEST_PROJECT, + location=_TEST_LOCATION, + staging_bucket=_TEST_STAGING_BUCKET, + encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME, + ) + + job = aiplatform.CustomJob( + display_name=_TEST_DISPLAY_NAME, worker_pool_specs=_TEST_WORKER_POOL_SPEC + ) + + job.run( + service_account=_TEST_SERVICE_ACCOUNT, + network=_TEST_NETWORK, + timeout=_TEST_TIMEOUT, + restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, + sync=False, + ) + + with pytest.raises(RuntimeError) as e: + job.wait_for_resource_creation() + assert e.match("Mock fail") + + with pytest.raises(RuntimeError) as e: + job.resource_name + assert e.match( + "CustomJob resource has not been created. Resource failed with: Mock fail" + ) + + with pytest.raises(RuntimeError) as e: + job.network + assert e.match( + "CustomJob resource has not been created. Resource failed with: Mock fail" + ) def test_custom_job_get_state_raises_without_run(self): aiplatform.init( diff --git a/tests/unit/aiplatform/test_datasets.py b/tests/unit/aiplatform/test_datasets.py index 25b8f27b63..3457ccc7bd 100644 --- a/tests/unit/aiplatform/test_datasets.py +++ b/tests/unit/aiplatform/test_datasets.py @@ -115,19 +115,29 @@ _TEST_DATASET_LIST = [ gca_dataset.Dataset( - display_name="a", metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_TABULAR + name=_TEST_NAME, + display_name="a", + metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_TABULAR, ), gca_dataset.Dataset( - display_name="d", metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_NONTABULAR + name=_TEST_NAME, + display_name="d", + metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_NONTABULAR, ), gca_dataset.Dataset( - display_name="b", metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_TABULAR + name=_TEST_NAME, + display_name="b", + metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_TABULAR, ), gca_dataset.Dataset( - display_name="e", metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_TEXT + name=_TEST_NAME, + display_name="e", + metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_TEXT, ), gca_dataset.Dataset( - display_name="c", metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_TABULAR + name=_TEST_NAME, + display_name="c", + metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_TABULAR, ), ] @@ -299,6 +309,15 @@ def create_dataset_mock(): yield create_dataset_mock +@pytest.fixture +def create_dataset_mock_fail(): + with patch.object( + dataset_service_client.DatasetServiceClient, "create_dataset" + ) as create_dataset_mock: + create_dataset_mock.side_effect = RuntimeError("Mock fail") + yield create_dataset_mock + + @pytest.fixture def delete_dataset_mock(): with mock.patch.object( @@ -321,6 +340,15 @@ def import_data_mock(): yield import_data_mock +@pytest.fixture +def import_data_mock_fail(): + with patch.object( + dataset_service_client.DatasetServiceClient, "import_data" + ) as import_data_mock: + import_data_mock.side_effect = RuntimeError("Mock fail") + yield import_data_mock + + @pytest.fixture def export_data_mock(): with patch.object( @@ -955,6 +983,8 @@ def test_create_dataset_with_default_encryption_key( if not sync: my_dataset.wait() + assert my_dataset.metadata_schema_uri == _TEST_METADATA_SCHEMA_URI_TABULAR + expected_dataset = gca_dataset.Dataset( display_name=_TEST_DISPLAY_NAME, metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_TABULAR, @@ -968,6 +998,32 @@ def test_create_dataset_with_default_encryption_key( metadata=_TEST_REQUEST_METADATA, ) + @pytest.mark.usefixtures("create_dataset_mock_fail") + def test_create_dataset_fail(self): + aiplatform.init( + project=_TEST_PROJECT, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, + ) + + my_dataset = datasets.TabularDataset.create( + display_name=_TEST_DISPLAY_NAME, bq_source=_TEST_SOURCE_URI_BQ, sync=False, + ) + + with pytest.raises(RuntimeError) as e: + my_dataset.wait() + assert e.match(regexp=r"Mock fail") + + with pytest.raises(RuntimeError) as e: + my_dataset.metadata_schema_uri + assert e.match( + regexp=r"TabularDataset resource has not been created. Resource failed with: Mock fail" + ) + + with pytest.raises(RuntimeError) as e: + my_dataset.column_names + assert e.match( + regexp=r"TabularDataset resource has not been created. Resource failed with: Mock fail" + ) + @pytest.mark.usefixtures("get_dataset_tabular_bq_mock") @pytest.mark.parametrize("sync", [True, False]) def test_create_dataset(self, create_dataset_mock, sync): @@ -1198,6 +1254,41 @@ def test_create_and_import_dataset( expected_dataset.name = _TEST_NAME assert my_dataset._gca_resource == expected_dataset + @pytest.mark.usefixtures( + "create_dataset_mock", "get_dataset_text_mock", "import_data_mock_fail" + ) + @pytest.mark.parametrize("sync", [True, False]) + def test_create_then_import_dataset_fails(self, sync): + aiplatform.init(project=_TEST_PROJECT) + + my_dataset = datasets.TextDataset.create( + display_name=_TEST_DISPLAY_NAME, + encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, + sync=sync, + ) + + if sync: + + with pytest.raises(RuntimeError) as e: + my_dataset.import_data( + gcs_source=[_TEST_SOURCE_URI_GCS], + import_schema_uri=_TEST_IMPORT_SCHEMA_URI_TEXT, + sync=sync, + ) + e.match(regexp=r"Mock fail") + + else: + + my_dataset.import_data( + gcs_source=[_TEST_SOURCE_URI_GCS], + import_schema_uri=_TEST_IMPORT_SCHEMA_URI_TEXT, + sync=sync, + ) + + with pytest.raises(RuntimeError) as e: + my_dataset.wait() + e.match(regexp=r"Mock fail") + @pytest.mark.usefixtures("get_dataset_text_mock") @pytest.mark.parametrize("sync", [True, False]) def test_import_data(self, import_data_mock, sync): diff --git a/tests/unit/aiplatform/test_endpoints.py b/tests/unit/aiplatform/test_endpoints.py index e9f7de971a..0ae76ea988 100644 --- a/tests/unit/aiplatform/test_endpoints.py +++ b/tests/unit/aiplatform/test_endpoints.py @@ -147,13 +147,19 @@ _TEST_ENDPOINT_LIST = [ gca_endpoint.Endpoint( - display_name="aac", create_time=datetime.now() - timedelta(minutes=15) + name=_TEST_ENDPOINT_NAME, + display_name="aac", + create_time=datetime.now() - timedelta(minutes=15), ), gca_endpoint.Endpoint( - display_name="aab", create_time=datetime.now() - timedelta(minutes=5) + name=_TEST_ENDPOINT_NAME, + display_name="aab", + create_time=datetime.now() - timedelta(minutes=5), ), gca_endpoint.Endpoint( - display_name="aaa", create_time=datetime.now() - timedelta(minutes=10) + name=_TEST_ENDPOINT_NAME, + display_name="aaa", + create_time=datetime.now() - timedelta(minutes=10), ), ] @@ -487,7 +493,23 @@ def test_create(self, create_endpoint_mock, sync): ) expected_endpoint.name = _TEST_ENDPOINT_NAME - assert my_endpoint._gca_resource == expected_endpoint + assert my_endpoint.gca_resource == expected_endpoint + assert my_endpoint.network is None + + @pytest.mark.usefixtures("get_endpoint_mock") + def test_accessing_properties_with_no_resource_raises(self,): + + my_endpoint = aiplatform.Endpoint(_TEST_ENDPOINT_NAME) + + my_endpoint._gca_resource = None + + with pytest.raises(RuntimeError) as e: + my_endpoint.gca_resource + e.match(regexp=r"Endpoint resource has not been created.") + + with pytest.raises(RuntimeError) as e: + my_endpoint.network + e.match(regexp=r"Endpoint resource has not been created.") @pytest.mark.usefixtures("get_endpoint_mock") @pytest.mark.parametrize("sync", [True, False]) diff --git a/tests/unit/aiplatform/test_hyperparameter_tuning_job.py b/tests/unit/aiplatform/test_hyperparameter_tuning_job.py index f4102fc3bb..e2d716e729 100644 --- a/tests/unit/aiplatform/test_hyperparameter_tuning_job.py +++ b/tests/unit/aiplatform/test_hyperparameter_tuning_job.py @@ -169,6 +169,10 @@ def get_hyperparameter_tuning_job_mock(): name=_TEST_HYPERPARAMETERTUNING_JOB_NAME, state=gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED, ), + _get_hyperparameter_tuning_job_proto( + name=_TEST_HYPERPARAMETERTUNING_JOB_NAME, + state=gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED, + ), ] yield get_hyperparameter_tuning_job_mock @@ -208,6 +212,15 @@ def create_hyperparameter_tuning_job_mock(): yield create_hyperparameter_tuning_job_mock +@pytest.fixture +def create_hyperparameter_tuning_job_mock_fail(): + with mock.patch.object( + job_service_client.JobServiceClient, "create_hyperparameter_tuning_job" + ) as create_hyperparameter_tuning_job_mock: + create_hyperparameter_tuning_job_mock.side_effect = RuntimeError("Mock fail") + yield create_hyperparameter_tuning_job_mock + + @pytest.fixture def create_hyperparameter_tuning_job_v1beta1_mock(): with mock.patch.object( @@ -287,9 +300,9 @@ def test_create_hyperparameter_tuning_job( hyperparameter_tuning_job=expected_hyperparameter_tuning_job, ) - assert ( - job._gca_resource.state == gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED - ) + assert job.state == gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED + assert job.network == _TEST_NETWORK + assert job.trials == [] @pytest.mark.parametrize("sync", [True, False]) def test_run_hyperparameter_tuning_job_with_fail_raises( @@ -351,6 +364,71 @@ def test_run_hyperparameter_tuning_job_with_fail_raises( assert job._gca_resource.state == gca_job_state_compat.JobState.JOB_STATE_FAILED + @pytest.mark.usefixtures("create_hyperparameter_tuning_job_mock_fail") + def test_run_hyperparameter_tuning_job_with_fail_at_creation(self): + aiplatform.init( + project=_TEST_PROJECT, + location=_TEST_LOCATION, + staging_bucket=_TEST_STAGING_BUCKET, + encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME, + ) + + custom_job = aiplatform.CustomJob( + display_name=test_custom_job._TEST_DISPLAY_NAME, + worker_pool_specs=test_custom_job._TEST_WORKER_POOL_SPEC, + ) + + job = aiplatform.HyperparameterTuningJob( + display_name=_TEST_DISPLAY_NAME, + custom_job=custom_job, + metric_spec={_TEST_METRIC_SPEC_KEY: _TEST_METRIC_SPEC_VALUE}, + parameter_spec={ + "lr": hpt.DoubleParameterSpec(min=0.001, max=0.1, scale="log"), + "units": hpt.IntegerParameterSpec(min=4, max=1028, scale="linear"), + "activation": hpt.CategoricalParameterSpec( + values=["relu", "sigmoid", "elu", "selu", "tanh"] + ), + "batch_size": hpt.DiscreteParameterSpec( + values=[16, 32], scale="linear" + ), + }, + parallel_trial_count=_TEST_PARALLEL_TRIAL_COUNT, + max_trial_count=_TEST_MAX_TRIAL_COUNT, + max_failed_trial_count=_TEST_MAX_FAILED_TRIAL_COUNT, + search_algorithm=_TEST_SEARCH_ALGORITHM, + measurement_selection=_TEST_MEASUREMENT_SELECTION, + ) + + job.run( + service_account=_TEST_SERVICE_ACCOUNT, + network=_TEST_NETWORK, + timeout=_TEST_TIMEOUT, + restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, + sync=False, + ) + + with pytest.raises(RuntimeError) as e: + job.wait_for_resource_creation() + assert e.match("Mock fail") + + with pytest.raises(RuntimeError) as e: + job.resource_name + assert e.match( + "HyperparameterTuningJob resource has not been created. Resource failed with: Mock fail" + ) + + with pytest.raises(RuntimeError) as e: + job.network + assert e.match( + "HyperparameterTuningJob resource has not been created. Resource failed with: Mock fail" + ) + + with pytest.raises(RuntimeError) as e: + job.trials + assert e.match( + "HyperparameterTuningJob resource has not been created. Resource failed with: Mock fail" + ) + def test_hyperparameter_tuning_job_get_state_raises_without_run(self): aiplatform.init( project=_TEST_PROJECT, diff --git a/tests/unit/aiplatform/test_jobs.py b/tests/unit/aiplatform/test_jobs.py index acc7317ebb..76584cd0c4 100644 --- a/tests/unit/aiplatform/test_jobs.py +++ b/tests/unit/aiplatform/test_jobs.py @@ -240,6 +240,15 @@ def create_batch_prediction_job_mock(): yield create_batch_prediction_job_mock +@pytest.fixture +def create_batch_prediction_job_mock_fail(): + with mock.patch.object( + job_service_client.JobServiceClient, "create_batch_prediction_job" + ) as create_batch_prediction_job_mock: + create_batch_prediction_job_mock.side_effect = RuntimeError("Mock fail") + yield create_batch_prediction_job_mock + + @pytest.fixture def create_batch_prediction_job_with_explanations_mock(): with mock.patch.object( @@ -472,6 +481,11 @@ def test_batch_predict_gcs_source_bq_dest( if not sync: batch_prediction_job.wait() + assert ( + batch_prediction_job.output_info + == gca_batch_prediction_job.BatchPredictionJob.OutputInfo() + ) + # Construct expected request expected_gapic_batch_prediction_job = gca_batch_prediction_job.BatchPredictionJob( display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME, @@ -562,6 +576,46 @@ def test_batch_predict_with_all_args( batch_prediction_job=expected_gapic_batch_prediction_job, ) + @pytest.mark.usefixtures("create_batch_prediction_job_mock_fail") + def test_batch_predict_create_fails(self): + aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION) + + batch_prediction_job = jobs.BatchPredictionJob.create( + model_name=_TEST_MODEL_NAME, + job_display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME, + gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE, + bigquery_destination_prefix=_TEST_BATCH_PREDICTION_BQ_PREFIX, + sync=False, + ) + + with pytest.raises(RuntimeError) as e: + batch_prediction_job.wait() + assert e.match(regexp=r"Mock fail") + + with pytest.raises(RuntimeError) as e: + batch_prediction_job.output_info + assert e.match( + regexp=r"BatchPredictionJob resource has not been created. Resource failed with: Mock fail" + ) + + with pytest.raises(RuntimeError) as e: + batch_prediction_job.partial_failures + assert e.match( + regexp=r"BatchPredictionJob resource has not been created. Resource failed with: Mock fail" + ) + + with pytest.raises(RuntimeError) as e: + batch_prediction_job.completion_stats + assert e.match( + regexp=r"BatchPredictionJob resource has not been created. Resource failed with: Mock fail" + ) + + with pytest.raises(RuntimeError) as e: + batch_prediction_job.iter_outputs() + assert e.match( + regexp=r"BatchPredictionJob resource has not been created. Resource failed with: Mock fail" + ) + @pytest.mark.usefixtures("get_batch_prediction_job_mock") def test_batch_predict_no_source(self, create_batch_prediction_job_mock): aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION) diff --git a/tests/unit/aiplatform/test_models.py b/tests/unit/aiplatform/test_models.py index be4f7f61bd..600b880d14 100644 --- a/tests/unit/aiplatform/test_models.py +++ b/tests/unit/aiplatform/test_models.py @@ -255,6 +255,8 @@ def get_model_with_custom_project_mock(): get_model_mock.return_value = gca_model.Model( display_name=_TEST_MODEL_NAME, name=_TEST_MODEL_RESOURCE_NAME_CUSTOM_PROJECT, + artifact_uri=_TEST_ARTIFACT_URI, + description=_TEST_DESCRIPTION, ) yield get_model_mock @@ -726,6 +728,47 @@ def test_upload_uploads_and_gets_model_with_custom_project( name=test_model_resource_name ) + assert my_model.uri == _TEST_ARTIFACT_URI + assert my_model.supported_export_formats == {} + assert my_model.supported_deployment_resources_types == [] + assert my_model.supported_input_storage_formats == [] + assert my_model.supported_output_storage_formats == [] + assert my_model.description == _TEST_DESCRIPTION + + @pytest.mark.usefixtures("get_model_with_custom_project_mock") + def test_accessing_properties_with_no_resource_raises(self,): + + test_model_resource_name = model_service_client.ModelServiceClient.model_path( + _TEST_PROJECT_2, _TEST_LOCATION, _TEST_ID + ) + + my_model = models.Model(test_model_resource_name) + my_model._gca_resource = None + + with pytest.raises(RuntimeError) as e: + my_model.uri + e.match(regexp=r"Model resource has not been created.") + + with pytest.raises(RuntimeError) as e: + my_model.supported_export_formats + e.match(regexp=r"Model resource has not been created.") + + with pytest.raises(RuntimeError) as e: + my_model.supported_deployment_resources_types + e.match(regexp=r"Model resource has not been created.") + + with pytest.raises(RuntimeError) as e: + my_model.supported_input_storage_formats + e.match(regexp=r"Model resource has not been created.") + + with pytest.raises(RuntimeError) as e: + my_model.supported_output_storage_formats + e.match(regexp=r"Model resource has not been created.") + + with pytest.raises(RuntimeError) as e: + my_model.description + e.match(regexp=r"Model resource has not been created.") + @pytest.mark.usefixtures("get_model_with_custom_location_mock") @pytest.mark.parametrize("sync", [True, False]) def test_upload_uploads_and_gets_model_with_custom_location( diff --git a/tests/unit/aiplatform/test_pipeline_jobs.py b/tests/unit/aiplatform/test_pipeline_jobs.py index ae740b1bcc..1f1d5c96de 100644 --- a/tests/unit/aiplatform/test_pipeline_jobs.py +++ b/tests/unit/aiplatform/test_pipeline_jobs.py @@ -299,7 +299,7 @@ def test_cancel_pipeline_job_without_running( with pytest.raises(RuntimeError) as e: job.cancel() - assert e.match(regexp=r"PipelineJob has not been launched") + assert e.match(regexp=r"PipelineJob resource has not been created") @pytest.mark.usefixtures( "mock_pipeline_service_create", diff --git a/tests/unit/aiplatform/test_training_jobs.py b/tests/unit/aiplatform/test_training_jobs.py index 0995e0cb95..72c17dedc5 100644 --- a/tests/unit/aiplatform/test_training_jobs.py +++ b/tests/unit/aiplatform/test_training_jobs.py @@ -141,7 +141,7 @@ _TEST_MODEL_NAME = "projects/my-project/locations/us-central1/models/12345" _TEST_PIPELINE_RESOURCE_NAME = ( - "projects/my-project/locations/us-central1/trainingPipeline/12345" + "projects/my-project/locations/us-central1/trainingPipelines/12345" ) _TEST_CREDENTIALS = mock.Mock(spec=auth_credentials.AnonymousCredentials())