From f9aecd22fe08a97e45187b4d11c755ac3b9dfadd Mon Sep 17 00:00:00 2001 From: Karl Weinmeister <11586922+kweinmeister@users.noreply.github.com> Date: Fri, 10 Dec 2021 12:31:20 -0600 Subject: [PATCH] docs: Updated docstrings with exception error classes (#894) Co-authored-by: sasha-gitg <44654632+sasha-gitg@users.noreply.github.com> --- google/cloud/aiplatform/base.py | 8 ++++---- google/cloud/aiplatform/datasets/_datasources.py | 4 ++-- google/cloud/aiplatform/datasets/dataset.py | 2 +- .../metadata/tf/v1/saved_model_metadata_builder.py | 2 +- .../metadata/tf/v2/saved_model_metadata_builder.py | 6 +++--- google/cloud/aiplatform/jobs.py | 4 ++-- google/cloud/aiplatform/metadata/metadata.py | 6 +++--- google/cloud/aiplatform/metadata/resource.py | 2 +- google/cloud/aiplatform/models.py | 6 +++--- google/cloud/aiplatform/training_jobs.py | 4 ++-- google/cloud/aiplatform/utils/featurestore_utils.py | 4 ++-- google/cloud/aiplatform/utils/tensorboard_utils.py | 2 +- 12 files changed, 25 insertions(+), 25 deletions(-) diff --git a/google/cloud/aiplatform/base.py b/google/cloud/aiplatform/base.py index 2ee4bf4635..d572913a25 100644 --- a/google/cloud/aiplatform/base.py +++ b/google/cloud/aiplatform/base.py @@ -504,7 +504,7 @@ def _get_and_validate_project_location( location(str): The location of the resource noun. Raises: - RuntimeError if location is different from resource location + RuntimeError: If location is different from resource location """ fields = utils.extract_fields_from_resource_name( @@ -604,7 +604,7 @@ def _assert_gca_resource_is_available(self) -> None: """Helper method to raise when property is not accessible. Raises: - RuntimeError if _gca_resource is has not been created. + RuntimeError: If _gca_resource is has not been created. """ if self._gca_resource is None: raise RuntimeError( @@ -1115,7 +1115,7 @@ def _wait_for_resource_creation(self) -> None: job.run(sync=False, ...) job._wait_for_resource_creation() Raises: - RuntimeError if the resource has not been scheduled to be created. + RuntimeError: If the resource has not been scheduled to be created. """ # If the user calls this but didn't actually invoke an API to create @@ -1141,7 +1141,7 @@ def _assert_gca_resource_is_available(self) -> None: resource creation has failed asynchronously. Raises: - RuntimeError when resource has not been created. + RuntimeError: When resource has not been created. """ if not getattr(self._gca_resource, "name", None): raise RuntimeError( diff --git a/google/cloud/aiplatform/datasets/_datasources.py b/google/cloud/aiplatform/datasets/_datasources.py index 9323f40382..2ca2c02bfd 100644 --- a/google/cloud/aiplatform/datasets/_datasources.py +++ b/google/cloud/aiplatform/datasets/_datasources.py @@ -71,7 +71,7 @@ def __init__( "bq://project.dataset.table_name" Raises: - ValueError if source configuration is not valid. + ValueError: If source configuration is not valid. """ dataset_metadata = None @@ -215,7 +215,7 @@ def create_datasource( datasource (Datasource) Raises: - ValueError when below scenarios happen + ValueError: When below scenarios happen: - import_schema_uri is identified for creating TabularDatasource - either import_schema_uri or gcs_source is missing for creating NonTabularDatasourceImportable """ diff --git a/google/cloud/aiplatform/datasets/dataset.py b/google/cloud/aiplatform/datasets/dataset.py index 5e5de0058b..cdb769a8b2 100644 --- a/google/cloud/aiplatform/datasets/dataset.py +++ b/google/cloud/aiplatform/datasets/dataset.py @@ -91,7 +91,7 @@ def _validate_metadata_schema_uri(self) -> None: """Validate the metadata_schema_uri of retrieved dataset resource. Raises: - ValueError if the dataset type of the retrieved dataset resource is + ValueError: If the dataset type of the retrieved dataset resource is not supported by the class. """ if self._supported_metadata_schema_uris and ( diff --git a/google/cloud/aiplatform/explain/metadata/tf/v1/saved_model_metadata_builder.py b/google/cloud/aiplatform/explain/metadata/tf/v1/saved_model_metadata_builder.py index 6f0af6d93b..b7ffed4802 100644 --- a/google/cloud/aiplatform/explain/metadata/tf/v1/saved_model_metadata_builder.py +++ b/google/cloud/aiplatform/explain/metadata/tf/v1/saved_model_metadata_builder.py @@ -50,7 +50,7 @@ def __init__( signature_name) specifies multiple outputs. Raises: - ValueError if outputs_to_explain contains more than 1 element or + ValueError: If outputs_to_explain contains more than 1 element or signature contains multiple outputs. """ if outputs_to_explain: diff --git a/google/cloud/aiplatform/explain/metadata/tf/v2/saved_model_metadata_builder.py b/google/cloud/aiplatform/explain/metadata/tf/v2/saved_model_metadata_builder.py index dd7f2b8d0a..7eb19386b4 100644 --- a/google/cloud/aiplatform/explain/metadata/tf/v2/saved_model_metadata_builder.py +++ b/google/cloud/aiplatform/explain/metadata/tf/v2/saved_model_metadata_builder.py @@ -49,8 +49,8 @@ def __init__( Any keyword arguments to be passed to tf.saved_model.save() function. Raises: - ValueError if outputs_to_explain contains more than 1 element. - ImportError if tf is not imported. + ValueError: If outputs_to_explain contains more than 1 element. + ImportError: If tf is not imported. """ if outputs_to_explain and len(outputs_to_explain) > 1: raise ValueError( @@ -91,7 +91,7 @@ def _infer_metadata_entries_from_model( Inferred input metadata and output metadata from the model. Raises: - ValueError if specified name is not found in signature outputs. + ValueError: If specified name is not found in signature outputs. """ loaded_sig = self._loaded_model.signatures[signature_name] diff --git a/google/cloud/aiplatform/jobs.py b/google/cloud/aiplatform/jobs.py index ae920ab2f3..1ad70faece 100644 --- a/google/cloud/aiplatform/jobs.py +++ b/google/cloud/aiplatform/jobs.py @@ -1049,7 +1049,7 @@ def __init__( staging_bucket set in aiplatform.init. Raises: - RuntimeError is not staging bucket was set using aiplatfrom.init and a staging + RuntimeError: If staging bucket was not set using aiplatform.init and a staging bucket was not passed in. """ @@ -1241,7 +1241,7 @@ def from_local_script( staging_bucket set in aiplatform.init. Raises: - RuntimeError is not staging bucket was set using aiplatfrom.init and a staging + RuntimeError: If staging bucket was not set using aiplatform.init and a staging bucket was not passed in. """ diff --git a/google/cloud/aiplatform/metadata/metadata.py b/google/cloud/aiplatform/metadata/metadata.py index 919eff8619..6ba664916e 100644 --- a/google/cloud/aiplatform/metadata/metadata.py +++ b/google/cloud/aiplatform/metadata/metadata.py @@ -157,8 +157,8 @@ def log_metrics(self, metrics: Dict[str, Union[float, int]]): metrics (Dict): Required. Metrics key/value pairs. Only flot and int are supported format for value. Raises: - TypeError if value contains unsupported types. - ValueError if Experiment or Run is not set. + TypeError: If value contains unsupported types. + ValueError: If Experiment or Run is not set. """ self._validate_experiment_and_run(method_name="log_metrics") @@ -265,7 +265,7 @@ def _validate_metrics_value_type(metrics: Dict[str, Union[float, int]]): metrics (Dict): Required. Metrics key/value pairs. Only flot and int are supported format for value. Raises: - TypeError if value contains unsupported types. + TypeError: If value contains unsupported types. """ for key, value in metrics.items(): diff --git a/google/cloud/aiplatform/metadata/resource.py b/google/cloud/aiplatform/metadata/resource.py index 3ebcaa5112..2727513234 100644 --- a/google/cloud/aiplatform/metadata/resource.py +++ b/google/cloud/aiplatform/metadata/resource.py @@ -451,7 +451,7 @@ def _extract_metadata_store_id(resource_name, resource_noun) -> str: metadata_store_id (str): The metadata store id for the particular resource name. Raises: - ValueError if it does not exist. + ValueError: If it does not exist. """ pattern = re.compile( r"^projects\/(?P[\w-]+)\/locations\/(?P[\w-]+)\/metadataStores\/(?P[\w-]+)\/" diff --git a/google/cloud/aiplatform/models.py b/google/cloud/aiplatform/models.py index 17ddc5c70d..6aca4f8c27 100644 --- a/google/cloud/aiplatform/models.py +++ b/google/cloud/aiplatform/models.py @@ -786,7 +786,7 @@ def _deploy( will be executed in concurrent Future and any downstream object will be immediately returned and synced when the Future has completed. Raises: - ValueError if there is not current traffic split and traffic percentage + ValueError: If there is not current traffic split and traffic percentage is not 0 or 100. """ _LOGGER.log_action_start_against_resource( @@ -2366,9 +2366,9 @@ def export_model( Details of the completed export with output destination paths to the artifacts or container image. Raises: - ValueError if model does not support exporting. + ValueError: If model does not support exporting. - ValueError if invalid arguments or export formats are provided. + ValueError: If invalid arguments or export formats are provided. """ # Model does not support exporting diff --git a/google/cloud/aiplatform/training_jobs.py b/google/cloud/aiplatform/training_jobs.py index 4afd4920db..aefcaa9dbc 100644 --- a/google/cloud/aiplatform/training_jobs.py +++ b/google/cloud/aiplatform/training_jobs.py @@ -4060,7 +4060,7 @@ def run( produce a Vertex AI Model. Raises: - RuntimeError if Training job has already been run or is waiting to run. + RuntimeError: If Training job has already been run or is waiting to run. """ if model_display_name: @@ -4269,7 +4269,7 @@ def _run_with_experiments( produce a Vertex AI Model. Raises: - RuntimeError if Training job has already been run or is waiting to run. + RuntimeError: If Training job has already been run or is waiting to run. """ if additional_experiments: diff --git a/google/cloud/aiplatform/utils/featurestore_utils.py b/google/cloud/aiplatform/utils/featurestore_utils.py index c78a96d185..23f3e48aad 100644 --- a/google/cloud/aiplatform/utils/featurestore_utils.py +++ b/google/cloud/aiplatform/utils/featurestore_utils.py @@ -47,7 +47,7 @@ def validate_and_get_entity_type_resource_ids( Tuple[str, str] - featurestore ID and entity_type ID Raises: - ValueError if the provided entity_type_name is not in form of a fully-qualified + ValueError: If the provided entity_type_name is not in form of a fully-qualified entityType resource name nor an entity_type ID with featurestore_id passed. """ match = CompatFeaturestoreServiceClient.parse_entity_type_path( @@ -91,7 +91,7 @@ def validate_and_get_feature_resource_ids( Tuple[str, str, str] - featurestore ID, entity_type ID, and feature ID Raises: - ValueError if the provided feature_name is not in form of a fully-qualified + ValueError: If the provided feature_name is not in form of a fully-qualified feature resource name nor a feature ID with featurestore_id and entity_type_id passed. """ diff --git a/google/cloud/aiplatform/utils/tensorboard_utils.py b/google/cloud/aiplatform/utils/tensorboard_utils.py index d3cb1ef704..acc9aad1ea 100644 --- a/google/cloud/aiplatform/utils/tensorboard_utils.py +++ b/google/cloud/aiplatform/utils/tensorboard_utils.py @@ -33,7 +33,7 @@ def _parse_experiment_name(experiment_name: str) -> Dict[str, str]: Components of the experiment name. Raises: - ValueError if the experiment_name is invalid. + ValueError: If the experiment_name is invalid. """ matched = TensorboardServiceClient.parse_tensorboard_experiment_path( experiment_name