diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml new file mode 100644 index 0000000000..d49860b32e --- /dev/null +++ b/.github/.OwlBot.lock.yaml @@ -0,0 +1,4 @@ +docker: + digest: sha256:457583330eec64daa02aeb7a72a04d33e7be2428f646671ce4045dcbc0191b1e + image: gcr.io/repo-automation-bots/owlbot-python:latest + diff --git a/.github/.OwlBot.yaml b/.github/.OwlBot.yaml new file mode 100644 index 0000000000..3abdf695ec --- /dev/null +++ b/.github/.OwlBot.yaml @@ -0,0 +1,26 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +docker: + image: gcr.io/repo-automation-bots/owlbot-python:latest + +deep-remove-regex: + - /owl-bot-staging + +deep-copy-regex: + - source: /google/cloud/aiplatform/(v.*)/.*-py/(.*) + dest: /owl-bot-staging/$1/$2 + +begin-after-commit-hash: 7774246dfb7839067cd64bba0600089b1c91bd85 + diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4f00c7cffc..1bbd787833 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -26,6 +26,6 @@ repos: hooks: - id: black - repo: https://gitlab.com/pycqa/flake8 - rev: 3.9.2 + rev: 3.9.1 hooks: - id: flake8 diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py index df5f78f60c..e79dd92ea8 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py @@ -36,6 +36,9 @@ from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.text_sentiment import ( TextSentimentPredictionResult, ) +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.time_series_forecasting import ( + TimeSeriesForecastingPredictionResult, +) from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.video_action_recognition import ( VideoActionRecognitionPredictionResult, ) @@ -54,6 +57,7 @@ "TabularRegressionPredictionResult", "TextExtractionPredictionResult", "TextSentimentPredictionResult", + "TimeSeriesForecastingPredictionResult", "VideoActionRecognitionPredictionResult", "VideoClassificationPredictionResult", "VideoObjectTrackingPredictionResult", diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py index 866cade4d0..889378a820 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py @@ -22,6 +22,7 @@ from .types.tabular_regression import TabularRegressionPredictionResult from .types.text_extraction import TextExtractionPredictionResult from .types.text_sentiment import TextSentimentPredictionResult +from .types.time_series_forecasting import TimeSeriesForecastingPredictionResult from .types.video_action_recognition import VideoActionRecognitionPredictionResult from .types.video_classification import VideoClassificationPredictionResult from .types.video_object_tracking import VideoObjectTrackingPredictionResult @@ -34,6 +35,7 @@ "TabularRegressionPredictionResult", "TextExtractionPredictionResult", "TextSentimentPredictionResult", + "TimeSeriesForecastingPredictionResult", "VideoActionRecognitionPredictionResult", "VideoClassificationPredictionResult", "VideoObjectTrackingPredictionResult", diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py index 0bb99636b3..0980c51fc9 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py @@ -20,6 +20,7 @@ from .tabular_regression import TabularRegressionPredictionResult from .text_extraction import TextExtractionPredictionResult from .text_sentiment import TextSentimentPredictionResult +from .time_series_forecasting import TimeSeriesForecastingPredictionResult from .video_action_recognition import VideoActionRecognitionPredictionResult from .video_classification import VideoClassificationPredictionResult from .video_object_tracking import VideoObjectTrackingPredictionResult @@ -32,6 +33,7 @@ "TabularRegressionPredictionResult", "TextExtractionPredictionResult", "TextSentimentPredictionResult", + "TimeSeriesForecastingPredictionResult", "VideoActionRecognitionPredictionResult", "VideoClassificationPredictionResult", "VideoObjectTrackingPredictionResult", diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py index 38bd8e3c85..96408bd5bc 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # - import proto # type: ignore @@ -26,21 +24,12 @@ class TimeSeriesForecastingPredictionResult(proto.Message): r"""Prediction output format for Time Series Forecasting. - Attributes: value (float): The regression value. - lower_bound (float): - The lower bound of the prediction interval. - upper_bound (float): - The upper bound of the prediction interval. """ - value = proto.Field(proto.FLOAT, number=1) - - lower_bound = proto.Field(proto.FLOAT, number=2) - - upper_bound = proto.Field(proto.FLOAT, number=3) + value = proto.Field(proto.FLOAT, number=1,) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py index eae6c5d2fa..fdcb21cae7 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py @@ -69,6 +69,15 @@ from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_sentiment import ( AutoMlTextSentimentInputs, ) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_time_series_forecasting import ( + AutoMlForecasting, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_time_series_forecasting import ( + AutoMlForecastingInputs, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_time_series_forecasting import ( + AutoMlForecastingMetadata, +) from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_action_recognition import ( AutoMlVideoActionRecognition, ) @@ -110,6 +119,9 @@ "AutoMlTextExtractionInputs", "AutoMlTextSentiment", "AutoMlTextSentimentInputs", + "AutoMlForecasting", + "AutoMlForecastingInputs", + "AutoMlForecastingMetadata", "AutoMlVideoActionRecognition", "AutoMlVideoActionRecognitionInputs", "AutoMlVideoClassification", diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py index 16b66c2fb6..1e26be2c1f 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py @@ -33,6 +33,9 @@ from .types.automl_text_extraction import AutoMlTextExtractionInputs from .types.automl_text_sentiment import AutoMlTextSentiment from .types.automl_text_sentiment import AutoMlTextSentimentInputs +from .types.automl_time_series_forecasting import AutoMlForecasting +from .types.automl_time_series_forecasting import AutoMlForecastingInputs +from .types.automl_time_series_forecasting import AutoMlForecastingMetadata from .types.automl_video_action_recognition import AutoMlVideoActionRecognition from .types.automl_video_action_recognition import AutoMlVideoActionRecognitionInputs from .types.automl_video_classification import AutoMlVideoClassification @@ -42,6 +45,9 @@ from .types.export_evaluated_data_items_config import ExportEvaluatedDataItemsConfig __all__ = ( + "AutoMlForecasting", + "AutoMlForecastingInputs", + "AutoMlForecastingMetadata", "AutoMlImageClassification", "AutoMlImageClassificationInputs", "AutoMlImageClassificationMetadata", diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py index d70e297826..56ac9456dc 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py @@ -45,6 +45,11 @@ AutoMlTextSentiment, AutoMlTextSentimentInputs, ) +from .automl_time_series_forecasting import ( + AutoMlForecasting, + AutoMlForecastingInputs, + AutoMlForecastingMetadata, +) from .automl_video_action_recognition import ( AutoMlVideoActionRecognition, AutoMlVideoActionRecognitionInputs, @@ -78,6 +83,9 @@ "AutoMlTextExtractionInputs", "AutoMlTextSentiment", "AutoMlTextSentimentInputs", + "AutoMlForecasting", + "AutoMlForecastingInputs", + "AutoMlForecastingMetadata", "AutoMlVideoActionRecognition", "AutoMlVideoActionRecognitionInputs", "AutoMlVideoClassification", diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py new file mode 100644 index 0000000000..1d3f8d0e3f --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py @@ -0,0 +1,394 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types import ( + export_evaluated_data_items_config as gcastd_export_evaluated_data_items_config, +) + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", + manifest={ + "AutoMlForecasting", + "AutoMlForecastingInputs", + "AutoMlForecastingMetadata", + }, +) + + +class AutoMlForecasting(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Forecasting + Model. + + Attributes: + inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs): + The input parameters of this TrainingJob. + metadata (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingMetadata): + The metadata information. + """ + + inputs = proto.Field(proto.MESSAGE, number=1, message="AutoMlForecastingInputs",) + metadata = proto.Field( + proto.MESSAGE, number=2, message="AutoMlForecastingMetadata", + ) + + +class AutoMlForecastingInputs(proto.Message): + r""" + Attributes: + target_column (str): + The name of the column that the model is to + predict. + time_series_identifier_column (str): + The name of the column that identifies the + time series. + time_column (str): + The name of the column that identifies time + order in the time series. + transformations (Sequence[google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation]): + Each transformation will apply transform + function to given input column. And the result + will be used for training. When creating + transformation for BigQuery Struct column, the + column should be flattened using "." as the + delimiter. + optimization_objective (str): + Objective function the model is optimizing towards. The + training process creates a model that optimizes the value of + the objective function over the validation set. + + The supported optimization objectives: + + - "minimize-rmse" (default) - Minimize root-mean-squared + error (RMSE). + + - "minimize-mae" - Minimize mean-absolute error (MAE). + + - "minimize-rmsle" - Minimize root-mean-squared log error + (RMSLE). + + - "minimize-rmspe" - Minimize root-mean-squared percentage + error (RMSPE). + + - "minimize-wape-mae" - Minimize the combination of + weighted absolute percentage error (WAPE) and + mean-absolute-error (MAE). + + - "minimize-quantile-loss" - Minimize the quantile loss at + the quantiles defined in ``quantiles``. + train_budget_milli_node_hours (int): + Required. The train budget of creating this + model, expressed in milli node hours i.e. 1,000 + value in this field means 1 node hour. + The training cost of the model will not exceed + this budget. The final cost will be attempted to + be close to the budget, though may end up being + (even) noticeably smaller - at the backend's + discretion. This especially may happen when + further model training ceases to provide any + improvements. + If the budget is set to a value known to be + insufficient to train a model for the given + dataset, the training won't be attempted and + will error. + + The train budget must be between 1,000 and + 72,000 milli node hours, inclusive. + weight_column (str): + Column name that should be used as the weight + column. Higher values in this column give more + importance to the row during model training. The + column must have numeric values between 0 and + 10000 inclusively; 0 means the row is ignored + for training. If weight column field is not set, + then all rows are assumed to have equal weight + of 1. + time_series_attribute_columns (Sequence[str]): + Column names that should be used as attribute + columns. The value of these columns does not + vary as a function of time. For example, store + ID or item color. + unavailable_at_forecast_columns (Sequence[str]): + Names of columns that are unavailable when a forecast is + requested. This column contains information for the given + entity (identified by the time_series_identifier_column) + that is unknown before the forecast For example, actual + weather on a given day. + available_at_forecast_columns (Sequence[str]): + Names of columns that are available and provided when a + forecast is requested. These columns contain information for + the given entity (identified by the + time_series_identifier_column column) that is known at + forecast. For example, predicted weather for a specific day. + data_granularity (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Granularity): + Expected difference in time granularity + between rows in the data. + forecast_horizon (int): + The amount of time into the future for which forecasted + values for the target are returned. Expressed in number of + units defined by the ``data_granularity`` field. + context_window (int): + The amount of time into the past training and prediction + data is used for model training and prediction respectively. + Expressed in number of units defined by the + ``data_granularity`` field. + export_evaluated_data_items_config (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.ExportEvaluatedDataItemsConfig): + Configuration for exporting test set + predictions to a BigQuery table. If this + configuration is absent, then the export is not + performed. + quantiles (Sequence[float]): + Quantiles to use for minimize-quantile-loss + ``optimization_objective``. Up to 5 quantiles are allowed of + values between 0 and 1, exclusive. Required if the value of + optimization_objective is minimize-quantile-loss. Represents + the percent quantiles to use for that objective. Quantiles + must be unique. + validation_options (str): + Validation options for the data validation component. The + available options are: + + - "fail-pipeline" - default, will validate against the + validation and fail the pipeline if it fails. + + - "ignore-validation" - ignore the results of the + validation and continue + """ + + class Transformation(proto.Message): + r""" + Attributes: + auto (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.AutoTransformation): + + numeric (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.NumericTransformation): + + categorical (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.CategoricalTransformation): + + timestamp (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.TimestampTransformation): + + text (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.TextTransformation): + + """ + + class AutoTransformation(proto.Message): + r"""Training pipeline will infer the proper transformation based + on the statistic of dataset. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field(proto.STRING, number=1,) + + class NumericTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The value converted to float32. + + - The z_score of the value. + + - log(value+1) when the value is greater than or equal to 0. + Otherwise, this transformation is not applied and the value is + considered a missing value. + + - z_score of log(value+1) when the value is greater than or equal + to 0. Otherwise, this transformation is not applied and the value + is considered a missing value. + + - A boolean value that indicates whether the value is valid. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field(proto.STRING, number=1,) + + class CategoricalTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The categorical string as is--no change to case, punctuation, + spelling, tense, and so on. + + - Convert the category name to a dictionary lookup index and + generate an embedding for each index. + + - Categories that appear less than 5 times in the training dataset + are treated as the "unknown" category. The "unknown" category + gets its own special lookup index and resulting embedding. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field(proto.STRING, number=1,) + + class TimestampTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - Apply the transformation functions for Numerical columns. + + - Determine the year, month, day,and weekday. Treat each value from + the timestamp as a Categorical column. + + - Invalid numerical values (for example, values that fall outside + of a typical timestamp range, or are extreme values) receive no + special treatment and are not removed. + + Attributes: + column_name (str): + + time_format (str): + The format in which that time field is expressed. The + time_format must either be one of: + + - ``unix-seconds`` + + - ``unix-milliseconds`` + + - ``unix-microseconds`` + + - ``unix-nanoseconds`` + + (for respectively number of seconds, milliseconds, + microseconds and nanoseconds since start of the Unix epoch); + + or be written in ``strftime`` syntax. + + If time_format is not set, then the default format is RFC + 3339 ``date-time`` format, where ``time-offset`` = ``"Z"`` + (e.g. 1985-04-12T23:20:50.52Z) + """ + + column_name = proto.Field(proto.STRING, number=1,) + time_format = proto.Field(proto.STRING, number=2,) + + class TextTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The text as is--no change to case, punctuation, spelling, tense, + and so on. + + - Convert the category name to a dictionary lookup index and + generate an embedding for each index. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field(proto.STRING, number=1,) + + auto = proto.Field( + proto.MESSAGE, + number=1, + oneof="transformation_detail", + message="AutoMlForecastingInputs.Transformation.AutoTransformation", + ) + numeric = proto.Field( + proto.MESSAGE, + number=2, + oneof="transformation_detail", + message="AutoMlForecastingInputs.Transformation.NumericTransformation", + ) + categorical = proto.Field( + proto.MESSAGE, + number=3, + oneof="transformation_detail", + message="AutoMlForecastingInputs.Transformation.CategoricalTransformation", + ) + timestamp = proto.Field( + proto.MESSAGE, + number=4, + oneof="transformation_detail", + message="AutoMlForecastingInputs.Transformation.TimestampTransformation", + ) + text = proto.Field( + proto.MESSAGE, + number=5, + oneof="transformation_detail", + message="AutoMlForecastingInputs.Transformation.TextTransformation", + ) + + class Granularity(proto.Message): + r"""A duration of time expressed in time granularity units. + Attributes: + unit (str): + The time granularity unit of this time period. The supported + units are: + + - "minute" + + - "hour" + + - "day" + + - "week" + + - "month" + + - "year". + quantity (int): + The number of granularity_units between data points in the + training data. If ``granularity_unit`` is ``minute``, can be + 1, 5, 10, 15, or 30. For all other values of + ``granularity_unit``, must be 1. + """ + + unit = proto.Field(proto.STRING, number=1,) + quantity = proto.Field(proto.INT64, number=2,) + + target_column = proto.Field(proto.STRING, number=1,) + time_series_identifier_column = proto.Field(proto.STRING, number=2,) + time_column = proto.Field(proto.STRING, number=3,) + transformations = proto.RepeatedField( + proto.MESSAGE, number=4, message=Transformation, + ) + optimization_objective = proto.Field(proto.STRING, number=5,) + train_budget_milli_node_hours = proto.Field(proto.INT64, number=6,) + weight_column = proto.Field(proto.STRING, number=7,) + time_series_attribute_columns = proto.RepeatedField(proto.STRING, number=19,) + unavailable_at_forecast_columns = proto.RepeatedField(proto.STRING, number=20,) + available_at_forecast_columns = proto.RepeatedField(proto.STRING, number=21,) + data_granularity = proto.Field(proto.MESSAGE, number=22, message=Granularity,) + forecast_horizon = proto.Field(proto.INT64, number=23,) + context_window = proto.Field(proto.INT64, number=24,) + export_evaluated_data_items_config = proto.Field( + proto.MESSAGE, + number=15, + message=gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig, + ) + quantiles = proto.RepeatedField(proto.DOUBLE, number=16,) + validation_options = proto.Field(proto.STRING, number=17,) + + +class AutoMlForecastingMetadata(proto.Message): + r"""Model metadata specific to AutoML Forecasting. + Attributes: + train_cost_milli_node_hours (int): + Output only. The actual training cost of the + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed the train budget. + """ + + train_cost_milli_node_hours = proto.Field(proto.INT64, number=1,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/services/dataset_service/async_client.py b/google/cloud/aiplatform_v1/services/dataset_service/async_client.py index b937183e37..43d13ba503 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/async_client.py @@ -91,7 +91,8 @@ class DatasetServiceAsyncClient: @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -106,7 +107,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -123,7 +124,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> DatasetServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: DatasetServiceTransport: The transport used by the client instance. @@ -142,7 +143,7 @@ def __init__( client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the dataset service client. + """Instantiates the dataset service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -369,7 +370,7 @@ async def update_dataset( update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): Required. The update mask applies to the resource. For the ``FieldMask`` definition, see - `FieldMask `__. + [google.protobuf.FieldMask][google.protobuf.FieldMask]. Updatable fields: - ``display_name`` @@ -891,7 +892,6 @@ async def get_annotation_spec( name (:class:`str`): Required. The name of the AnnotationSpec resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}`` This corresponds to the ``name`` field @@ -964,7 +964,6 @@ async def list_annotations( parent (:class:`str`): Required. The resource name of the DataItem to list Annotations from. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}`` This corresponds to the ``parent`` field diff --git a/google/cloud/aiplatform_v1/services/dataset_service/client.py b/google/cloud/aiplatform_v1/services/dataset_service/client.py index 201d814c99..26bc6ef650 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/client.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/client.py @@ -65,7 +65,7 @@ class DatasetServiceClientMeta(type): _transport_registry["grpc_asyncio"] = DatasetServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[DatasetServiceTransport]: - """Return an appropriate transport class. + """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is @@ -88,7 +88,8 @@ class DatasetServiceClient(metaclass=DatasetServiceClientMeta): @staticmethod def _get_default_mtls_endpoint(api_endpoint): - """Convert api endpoint to mTLS endpoint. + """Converts api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: @@ -122,7 +123,8 @@ def _get_default_mtls_endpoint(api_endpoint): @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -139,7 +141,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -158,10 +160,11 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> DatasetServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: - DatasetServiceTransport: The transport used by the client instance. + DatasetServiceTransport: The transport used by the client + instance. """ return self._transport @@ -169,7 +172,7 @@ def transport(self) -> DatasetServiceTransport: def annotation_path( project: str, location: str, dataset: str, data_item: str, annotation: str, ) -> str: - """Return a fully-qualified annotation string.""" + """Returns a fully-qualified annotation string.""" return "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}/annotations/{annotation}".format( project=project, location=location, @@ -180,7 +183,7 @@ def annotation_path( @staticmethod def parse_annotation_path(path: str) -> Dict[str, str]: - """Parse a annotation path into its component segments.""" + """Parses a annotation path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/dataItems/(?P.+?)/annotations/(?P.+?)$", path, @@ -191,7 +194,7 @@ def parse_annotation_path(path: str) -> Dict[str, str]: def annotation_spec_path( project: str, location: str, dataset: str, annotation_spec: str, ) -> str: - """Return a fully-qualified annotation_spec string.""" + """Returns a fully-qualified annotation_spec string.""" return "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format( project=project, location=location, @@ -201,7 +204,7 @@ def annotation_spec_path( @staticmethod def parse_annotation_spec_path(path: str) -> Dict[str, str]: - """Parse a annotation_spec path into its component segments.""" + """Parses a annotation_spec path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/annotationSpecs/(?P.+?)$", path, @@ -212,14 +215,14 @@ def parse_annotation_spec_path(path: str) -> Dict[str, str]: def data_item_path( project: str, location: str, dataset: str, data_item: str, ) -> str: - """Return a fully-qualified data_item string.""" + """Returns a fully-qualified data_item string.""" return "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}".format( project=project, location=location, dataset=dataset, data_item=data_item, ) @staticmethod def parse_data_item_path(path: str) -> Dict[str, str]: - """Parse a data_item path into its component segments.""" + """Parses a data_item path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/dataItems/(?P.+?)$", path, @@ -228,14 +231,14 @@ def parse_data_item_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path(project: str, location: str, dataset: str,) -> str: - """Return a fully-qualified dataset string.""" + """Returns a fully-qualified dataset string.""" return "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: - """Parse a dataset path into its component segments.""" + """Parses a dataset path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path, @@ -244,7 +247,7 @@ def parse_dataset_path(path: str) -> Dict[str, str]: @staticmethod def common_billing_account_path(billing_account: str,) -> str: - """Return a fully-qualified billing_account string.""" + """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -257,7 +260,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: @staticmethod def common_folder_path(folder: str,) -> str: - """Return a fully-qualified folder string.""" + """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod @@ -268,7 +271,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: @staticmethod def common_organization_path(organization: str,) -> str: - """Return a fully-qualified organization string.""" + """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod @@ -279,7 +282,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: @staticmethod def common_project_path(project: str,) -> str: - """Return a fully-qualified project string.""" + """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod @@ -290,7 +293,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]: @staticmethod def common_location_path(project: str, location: str,) -> str: - """Return a fully-qualified location string.""" + """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -309,7 +312,7 @@ def __init__( client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the dataset service client. + """Instantiates the dataset service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -364,9 +367,10 @@ def __init__( client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() - client_cert_source_func = ( - mtls.default_client_cert_source() if is_mtls else None - ) + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -378,12 +382,14 @@ def __init__( elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": - api_endpoint = ( - self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT - ) + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" ) # Save or instantiate the transport. @@ -398,8 +404,8 @@ def __init__( ) if client_options.scopes: raise ValueError( - "When providing a transport instance, " - "provide its scopes directly." + "When providing a transport instance, provide its scopes " + "directly." ) self._transport = transport else: @@ -602,7 +608,7 @@ def update_dataset( update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The update mask applies to the resource. For the ``FieldMask`` definition, see - `FieldMask `__. + [google.protobuf.FieldMask][google.protobuf.FieldMask]. Updatable fields: - ``display_name`` @@ -1124,7 +1130,6 @@ def get_annotation_spec( name (str): Required. The name of the AnnotationSpec resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}`` This corresponds to the ``name`` field @@ -1197,7 +1202,6 @@ def list_annotations( parent (str): Required. The resource name of the DataItem to list Annotations from. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}`` This corresponds to the ``parent`` field diff --git a/google/cloud/aiplatform_v1/services/dataset_service/pagers.py b/google/cloud/aiplatform_v1/services/dataset_service/pagers.py index be142bd36e..e7229b1097 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/pagers.py @@ -118,7 +118,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -246,7 +246,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -374,7 +374,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py b/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py index 544a7788df..47e6421c4e 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py @@ -81,7 +81,8 @@ class EndpointServiceAsyncClient: @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -96,7 +97,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -113,7 +114,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> EndpointServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: EndpointServiceTransport: The transport used by the client instance. @@ -132,7 +133,7 @@ def __init__( client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the endpoint service client. + """Instantiates the endpoint service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -439,7 +440,7 @@ async def update_endpoint( should not be set. update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): Required. The update mask applies to the resource. See - `FieldMask `__. + [google.protobuf.FieldMask][google.protobuf.FieldMask]. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/client.py b/google/cloud/aiplatform_v1/services/endpoint_service/client.py index 8bc3a8026f..6b0ffb9a90 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/client.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/client.py @@ -61,7 +61,7 @@ class EndpointServiceClientMeta(type): _transport_registry["grpc_asyncio"] = EndpointServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[EndpointServiceTransport]: - """Return an appropriate transport class. + """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is @@ -84,7 +84,8 @@ class EndpointServiceClient(metaclass=EndpointServiceClientMeta): @staticmethod def _get_default_mtls_endpoint(api_endpoint): - """Convert api endpoint to mTLS endpoint. + """Converts api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: @@ -118,7 +119,8 @@ def _get_default_mtls_endpoint(api_endpoint): @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -135,7 +137,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -154,23 +156,24 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> EndpointServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: - EndpointServiceTransport: The transport used by the client instance. + EndpointServiceTransport: The transport used by the client + instance. """ return self._transport @staticmethod def endpoint_path(project: str, location: str, endpoint: str,) -> str: - """Return a fully-qualified endpoint string.""" + """Returns a fully-qualified endpoint string.""" return "projects/{project}/locations/{location}/endpoints/{endpoint}".format( project=project, location=location, endpoint=endpoint, ) @staticmethod def parse_endpoint_path(path: str) -> Dict[str, str]: - """Parse a endpoint path into its component segments.""" + """Parses a endpoint path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path, @@ -179,14 +182,14 @@ def parse_endpoint_path(path: str) -> Dict[str, str]: @staticmethod def model_path(project: str, location: str, model: str,) -> str: - """Return a fully-qualified model string.""" + """Returns a fully-qualified model string.""" return "projects/{project}/locations/{location}/models/{model}".format( project=project, location=location, model=model, ) @staticmethod def parse_model_path(path: str) -> Dict[str, str]: - """Parse a model path into its component segments.""" + """Parses a model path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path, @@ -195,7 +198,7 @@ def parse_model_path(path: str) -> Dict[str, str]: @staticmethod def common_billing_account_path(billing_account: str,) -> str: - """Return a fully-qualified billing_account string.""" + """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -208,7 +211,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: @staticmethod def common_folder_path(folder: str,) -> str: - """Return a fully-qualified folder string.""" + """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod @@ -219,7 +222,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: @staticmethod def common_organization_path(organization: str,) -> str: - """Return a fully-qualified organization string.""" + """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod @@ -230,7 +233,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: @staticmethod def common_project_path(project: str,) -> str: - """Return a fully-qualified project string.""" + """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod @@ -241,7 +244,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]: @staticmethod def common_location_path(project: str, location: str,) -> str: - """Return a fully-qualified location string.""" + """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -260,7 +263,7 @@ def __init__( client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the endpoint service client. + """Instantiates the endpoint service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -315,9 +318,10 @@ def __init__( client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() - client_cert_source_func = ( - mtls.default_client_cert_source() if is_mtls else None - ) + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -329,12 +333,14 @@ def __init__( elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": - api_endpoint = ( - self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT - ) + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" ) # Save or instantiate the transport. @@ -349,8 +355,8 @@ def __init__( ) if client_options.scopes: raise ValueError( - "When providing a transport instance, " - "provide its scopes directly." + "When providing a transport instance, provide its scopes " + "directly." ) self._transport = transport else: @@ -633,7 +639,7 @@ def update_endpoint( should not be set. update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The update mask applies to the resource. See - `FieldMask `__. + [google.protobuf.FieldMask][google.protobuf.FieldMask]. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py b/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py index 0b222aee01..739497c613 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py @@ -116,7 +116,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and diff --git a/google/cloud/aiplatform_v1/services/job_service/async_client.py b/google/cloud/aiplatform_v1/services/job_service/async_client.py index ff31a99af8..c3a2e84cd1 100644 --- a/google/cloud/aiplatform_v1/services/job_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/job_service/async_client.py @@ -60,7 +60,7 @@ class JobServiceAsyncClient: - """A service for creating and managing AI Platform's jobs.""" + """A service for creating and managing Vertex AI's jobs.""" _client: JobServiceClient @@ -110,7 +110,8 @@ class JobServiceAsyncClient: @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -125,7 +126,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -142,7 +143,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> JobServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: JobServiceTransport: The transport used by the client instance. @@ -161,7 +162,7 @@ def __init__( client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the job service client. + """Instantiates the job service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -629,7 +630,7 @@ async def create_data_labeling_job( Args: request (:class:`google.cloud.aiplatform_v1.types.CreateDataLabelingJobRequest`): The request object. Request message for - [DataLabelingJobService.CreateDataLabelingJob][]. + [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1.JobService.CreateDataLabelingJob]. parent (:class:`str`): Required. The parent of the DataLabelingJob. Format: ``projects/{project}/locations/{location}`` @@ -710,10 +711,9 @@ async def get_data_labeling_job( Args: request (:class:`google.cloud.aiplatform_v1.types.GetDataLabelingJobRequest`): The request object. Request message for - [DataLabelingJobService.GetDataLabelingJob][]. + [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1.JobService.GetDataLabelingJob]. name (:class:`str`): Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` This corresponds to the ``name`` field @@ -783,7 +783,7 @@ async def list_data_labeling_jobs( Args: request (:class:`google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest`): The request object. Request message for - [DataLabelingJobService.ListDataLabelingJobs][]. + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. parent (:class:`str`): Required. The parent of the DataLabelingJob. Format: ``projects/{project}/locations/{location}`` @@ -867,7 +867,6 @@ async def delete_data_labeling_job( name (:class:`str`): Required. The name of the DataLabelingJob to be deleted. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` This corresponds to the ``name`` field @@ -958,10 +957,9 @@ async def cancel_data_labeling_job( Args: request (:class:`google.cloud.aiplatform_v1.types.CancelDataLabelingJobRequest`): The request object. Request message for - [DataLabelingJobService.CancelDataLabelingJob][]. + [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1.JobService.CancelDataLabelingJob]. name (:class:`str`): Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` This corresponds to the ``name`` field @@ -1111,7 +1109,6 @@ async def get_hyperparameter_tuning_job( name (:class:`str`): Required. The name of the HyperparameterTuningJob resource. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` This corresponds to the ``name`` field @@ -1267,7 +1264,6 @@ async def delete_hyperparameter_tuning_job( name (:class:`str`): Required. The name of the HyperparameterTuningJob resource to be deleted. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` This corresponds to the ``name`` field @@ -1374,7 +1370,6 @@ async def cancel_hyperparameter_tuning_job( name (:class:`str`): Required. The name of the HyperparameterTuningJob to cancel. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` This corresponds to the ``name`` field @@ -1527,7 +1522,6 @@ async def get_batch_prediction_job( name (:class:`str`): Required. The name of the BatchPredictionJob resource. Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` This corresponds to the ``name`` field @@ -1686,7 +1680,6 @@ async def delete_batch_prediction_job( name (:class:`str`): Required. The name of the BatchPredictionJob resource to be deleted. Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` This corresponds to the ``name`` field @@ -1791,7 +1784,6 @@ async def cancel_batch_prediction_job( name (:class:`str`): Required. The name of the BatchPredictionJob to cancel. Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` This corresponds to the ``name`` field diff --git a/google/cloud/aiplatform_v1/services/job_service/client.py b/google/cloud/aiplatform_v1/services/job_service/client.py index d06dece139..687a832212 100644 --- a/google/cloud/aiplatform_v1/services/job_service/client.py +++ b/google/cloud/aiplatform_v1/services/job_service/client.py @@ -76,7 +76,7 @@ class JobServiceClientMeta(type): _transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[JobServiceTransport]: - """Return an appropriate transport class. + """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is @@ -95,11 +95,12 @@ def get_transport_class(cls, label: str = None,) -> Type[JobServiceTransport]: class JobServiceClient(metaclass=JobServiceClientMeta): - """A service for creating and managing AI Platform's jobs.""" + """A service for creating and managing Vertex AI's jobs.""" @staticmethod def _get_default_mtls_endpoint(api_endpoint): - """Convert api endpoint to mTLS endpoint. + """Converts api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: @@ -133,7 +134,8 @@ def _get_default_mtls_endpoint(api_endpoint): @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -150,7 +152,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -169,10 +171,11 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> JobServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: - JobServiceTransport: The transport used by the client instance. + JobServiceTransport: The transport used by the client + instance. """ return self._transport @@ -180,7 +183,7 @@ def transport(self) -> JobServiceTransport: def batch_prediction_job_path( project: str, location: str, batch_prediction_job: str, ) -> str: - """Return a fully-qualified batch_prediction_job string.""" + """Returns a fully-qualified batch_prediction_job string.""" return "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format( project=project, location=location, @@ -189,7 +192,7 @@ def batch_prediction_job_path( @staticmethod def parse_batch_prediction_job_path(path: str) -> Dict[str, str]: - """Parse a batch_prediction_job path into its component segments.""" + """Parses a batch_prediction_job path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/batchPredictionJobs/(?P.+?)$", path, @@ -198,14 +201,14 @@ def parse_batch_prediction_job_path(path: str) -> Dict[str, str]: @staticmethod def custom_job_path(project: str, location: str, custom_job: str,) -> str: - """Return a fully-qualified custom_job string.""" + """Returns a fully-qualified custom_job string.""" return "projects/{project}/locations/{location}/customJobs/{custom_job}".format( project=project, location=location, custom_job=custom_job, ) @staticmethod def parse_custom_job_path(path: str) -> Dict[str, str]: - """Parse a custom_job path into its component segments.""" + """Parses a custom_job path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", path, @@ -216,14 +219,14 @@ def parse_custom_job_path(path: str) -> Dict[str, str]: def data_labeling_job_path( project: str, location: str, data_labeling_job: str, ) -> str: - """Return a fully-qualified data_labeling_job string.""" + """Returns a fully-qualified data_labeling_job string.""" return "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}".format( project=project, location=location, data_labeling_job=data_labeling_job, ) @staticmethod def parse_data_labeling_job_path(path: str) -> Dict[str, str]: - """Parse a data_labeling_job path into its component segments.""" + """Parses a data_labeling_job path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/dataLabelingJobs/(?P.+?)$", path, @@ -232,14 +235,14 @@ def parse_data_labeling_job_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path(project: str, location: str, dataset: str,) -> str: - """Return a fully-qualified dataset string.""" + """Returns a fully-qualified dataset string.""" return "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: - """Parse a dataset path into its component segments.""" + """Parses a dataset path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path, @@ -250,7 +253,7 @@ def parse_dataset_path(path: str) -> Dict[str, str]: def hyperparameter_tuning_job_path( project: str, location: str, hyperparameter_tuning_job: str, ) -> str: - """Return a fully-qualified hyperparameter_tuning_job string.""" + """Returns a fully-qualified hyperparameter_tuning_job string.""" return "projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}".format( project=project, location=location, @@ -259,7 +262,7 @@ def hyperparameter_tuning_job_path( @staticmethod def parse_hyperparameter_tuning_job_path(path: str) -> Dict[str, str]: - """Parse a hyperparameter_tuning_job path into its component segments.""" + """Parses a hyperparameter_tuning_job path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/hyperparameterTuningJobs/(?P.+?)$", path, @@ -268,14 +271,14 @@ def parse_hyperparameter_tuning_job_path(path: str) -> Dict[str, str]: @staticmethod def model_path(project: str, location: str, model: str,) -> str: - """Return a fully-qualified model string.""" + """Returns a fully-qualified model string.""" return "projects/{project}/locations/{location}/models/{model}".format( project=project, location=location, model=model, ) @staticmethod def parse_model_path(path: str) -> Dict[str, str]: - """Parse a model path into its component segments.""" + """Parses a model path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path, @@ -284,14 +287,14 @@ def parse_model_path(path: str) -> Dict[str, str]: @staticmethod def trial_path(project: str, location: str, study: str, trial: str,) -> str: - """Return a fully-qualified trial string.""" + """Returns a fully-qualified trial string.""" return "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format( project=project, location=location, study=study, trial=trial, ) @staticmethod def parse_trial_path(path: str) -> Dict[str, str]: - """Parse a trial path into its component segments.""" + """Parses a trial path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/studies/(?P.+?)/trials/(?P.+?)$", path, @@ -300,7 +303,7 @@ def parse_trial_path(path: str) -> Dict[str, str]: @staticmethod def common_billing_account_path(billing_account: str,) -> str: - """Return a fully-qualified billing_account string.""" + """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -313,7 +316,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: @staticmethod def common_folder_path(folder: str,) -> str: - """Return a fully-qualified folder string.""" + """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod @@ -324,7 +327,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: @staticmethod def common_organization_path(organization: str,) -> str: - """Return a fully-qualified organization string.""" + """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod @@ -335,7 +338,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: @staticmethod def common_project_path(project: str,) -> str: - """Return a fully-qualified project string.""" + """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod @@ -346,7 +349,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]: @staticmethod def common_location_path(project: str, location: str,) -> str: - """Return a fully-qualified location string.""" + """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -365,7 +368,7 @@ def __init__( client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the job service client. + """Instantiates the job service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -420,9 +423,10 @@ def __init__( client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() - client_cert_source_func = ( - mtls.default_client_cert_source() if is_mtls else None - ) + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -434,12 +438,14 @@ def __init__( elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": - api_endpoint = ( - self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT - ) + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" ) # Save or instantiate the transport. @@ -454,8 +460,8 @@ def __init__( ) if client_options.scopes: raise ValueError( - "When providing a transport instance, " - "provide its scopes directly." + "When providing a transport instance, provide its scopes " + "directly." ) self._transport = transport else: @@ -899,7 +905,7 @@ def create_data_labeling_job( Args: request (google.cloud.aiplatform_v1.types.CreateDataLabelingJobRequest): The request object. Request message for - [DataLabelingJobService.CreateDataLabelingJob][]. + [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1.JobService.CreateDataLabelingJob]. parent (str): Required. The parent of the DataLabelingJob. Format: ``projects/{project}/locations/{location}`` @@ -980,10 +986,9 @@ def get_data_labeling_job( Args: request (google.cloud.aiplatform_v1.types.GetDataLabelingJobRequest): The request object. Request message for - [DataLabelingJobService.GetDataLabelingJob][]. + [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1.JobService.GetDataLabelingJob]. name (str): Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` This corresponds to the ``name`` field @@ -1053,7 +1058,7 @@ def list_data_labeling_jobs( Args: request (google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest): The request object. Request message for - [DataLabelingJobService.ListDataLabelingJobs][]. + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. parent (str): Required. The parent of the DataLabelingJob. Format: ``projects/{project}/locations/{location}`` @@ -1137,7 +1142,6 @@ def delete_data_labeling_job( name (str): Required. The name of the DataLabelingJob to be deleted. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` This corresponds to the ``name`` field @@ -1228,10 +1232,9 @@ def cancel_data_labeling_job( Args: request (google.cloud.aiplatform_v1.types.CancelDataLabelingJobRequest): The request object. Request message for - [DataLabelingJobService.CancelDataLabelingJob][]. + [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1.JobService.CancelDataLabelingJob]. name (str): Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` This corresponds to the ``name`` field @@ -1383,7 +1386,6 @@ def get_hyperparameter_tuning_job( name (str): Required. The name of the HyperparameterTuningJob resource. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` This corresponds to the ``name`` field @@ -1543,7 +1545,6 @@ def delete_hyperparameter_tuning_job( name (str): Required. The name of the HyperparameterTuningJob resource to be deleted. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` This corresponds to the ``name`` field @@ -1652,7 +1653,6 @@ def cancel_hyperparameter_tuning_job( name (str): Required. The name of the HyperparameterTuningJob to cancel. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` This corresponds to the ``name`` field @@ -1809,7 +1809,6 @@ def get_batch_prediction_job( name (str): Required. The name of the BatchPredictionJob resource. Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` This corresponds to the ``name`` field @@ -1970,7 +1969,6 @@ def delete_batch_prediction_job( name (str): Required. The name of the BatchPredictionJob resource to be deleted. Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` This corresponds to the ``name`` field @@ -2077,7 +2075,6 @@ def cancel_batch_prediction_job( name (str): Required. The name of the BatchPredictionJob to cancel. Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` This corresponds to the ``name`` field diff --git a/google/cloud/aiplatform_v1/services/job_service/pagers.py b/google/cloud/aiplatform_v1/services/job_service/pagers.py index 53d7a12e57..7bb25cc294 100644 --- a/google/cloud/aiplatform_v1/services/job_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/job_service/pagers.py @@ -119,7 +119,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -247,7 +247,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -377,7 +377,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -509,7 +509,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py index 2a74d1c8d6..d9ce228e91 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py @@ -46,7 +46,7 @@ class JobServiceGrpcTransport(JobServiceTransport): """gRPC backend transport for JobService. - A service for creating and managing AI Platform's jobs. + A service for creating and managing Vertex AI's jobs. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py index 95c2e34118..256aa9b830 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py @@ -48,7 +48,7 @@ class JobServiceGrpcAsyncIOTransport(JobServiceTransport): """gRPC AsyncIO backend transport for JobService. - A service for creating and managing AI Platform's jobs. + A service for creating and managing Vertex AI's jobs. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation diff --git a/google/cloud/aiplatform_v1/services/migration_service/async_client.py b/google/cloud/aiplatform_v1/services/migration_service/async_client.py index 3895ec95f2..878c71d554 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/migration_service/async_client.py @@ -38,8 +38,7 @@ class MigrationServiceAsyncClient: """A service that migrates resources from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to AI - Platform. + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. """ _client: MigrationServiceClient @@ -90,7 +89,8 @@ class MigrationServiceAsyncClient: @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -105,7 +105,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -122,7 +122,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> MigrationServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: MigrationServiceTransport: The transport used by the client instance. @@ -141,7 +141,7 @@ def __init__( client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the migration service client. + """Instantiates the migration service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -191,7 +191,7 @@ async def search_migratable_resources( ) -> pagers.SearchMigratableResourcesAsyncPager: r"""Searches all of the resources in automl.googleapis.com, datalabeling.googleapis.com and - ml.googleapis.com that can be migrated to AI Platform's + ml.googleapis.com that can be migrated to Vertex AI's given location. Args: @@ -200,7 +200,7 @@ async def search_migratable_resources( [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. parent (:class:`str`): Required. The location that the migratable resources - should be searched from. It's the AI Platform location + should be searched from. It's the Vertex AI location that the resources can be migrated to, not the resources' original location. Format: ``projects/{project}/locations/{location}`` @@ -280,7 +280,7 @@ async def batch_migrate_resources( ) -> operation_async.AsyncOperation: r"""Batch migrates resources from ml.googleapis.com, automl.googleapis.com, and datalabeling.googleapis.com - to AI Platform (Unified). + to Vertex AI. Args: request (:class:`google.cloud.aiplatform_v1.types.BatchMigrateResourcesRequest`): diff --git a/google/cloud/aiplatform_v1/services/migration_service/client.py b/google/cloud/aiplatform_v1/services/migration_service/client.py index 8f91e19bfe..162818246f 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1/services/migration_service/client.py @@ -55,7 +55,7 @@ class MigrationServiceClientMeta(type): _transport_registry["grpc_asyncio"] = MigrationServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[MigrationServiceTransport]: - """Return an appropriate transport class. + """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is @@ -75,13 +75,13 @@ def get_transport_class(cls, label: str = None,) -> Type[MigrationServiceTranspo class MigrationServiceClient(metaclass=MigrationServiceClientMeta): """A service that migrates resources from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to AI - Platform. + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. """ @staticmethod def _get_default_mtls_endpoint(api_endpoint): - """Convert api endpoint to mTLS endpoint. + """Converts api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: @@ -115,7 +115,8 @@ def _get_default_mtls_endpoint(api_endpoint): @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -132,7 +133,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -151,10 +152,11 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> MigrationServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: - MigrationServiceTransport: The transport used by the client instance. + MigrationServiceTransport: The transport used by the client + instance. """ return self._transport @@ -162,14 +164,14 @@ def transport(self) -> MigrationServiceTransport: def annotated_dataset_path( project: str, dataset: str, annotated_dataset: str, ) -> str: - """Return a fully-qualified annotated_dataset string.""" + """Returns a fully-qualified annotated_dataset string.""" return "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}".format( project=project, dataset=dataset, annotated_dataset=annotated_dataset, ) @staticmethod def parse_annotated_dataset_path(path: str) -> Dict[str, str]: - """Parse a annotated_dataset path into its component segments.""" + """Parses a annotated_dataset path into its component segments.""" m = re.match( r"^projects/(?P.+?)/datasets/(?P.+?)/annotatedDatasets/(?P.+?)$", path, @@ -178,14 +180,14 @@ def parse_annotated_dataset_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path(project: str, location: str, dataset: str,) -> str: - """Return a fully-qualified dataset string.""" + """Returns a fully-qualified dataset string.""" return "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: - """Parse a dataset path into its component segments.""" + """Parses a dataset path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path, @@ -194,43 +196,43 @@ def parse_dataset_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path(project: str, location: str, dataset: str,) -> str: - """Return a fully-qualified dataset string.""" + """Returns a fully-qualified dataset string.""" return "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: - """Parse a dataset path into its component segments.""" + """Parses a dataset path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path, ) return m.groupdict() if m else {} - # @staticmethod - # def dataset_path(project: str, dataset: str,) -> str: - # """Return a fully-qualified dataset string.""" - # return "projects/{project}/datasets/{dataset}".format( - # project=project, dataset=dataset, - # ) + @staticmethod + def dataset_path(project: str, dataset: str,) -> str: + """Returns a fully-qualified dataset string.""" + return "projects/{project}/datasets/{dataset}".format( + project=project, dataset=dataset, + ) - # @staticmethod - # def parse_dataset_path(path: str) -> Dict[str, str]: - # """Parse a dataset path into its component segments.""" - # m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) - # return m.groupdict() if m else {} + @staticmethod + def parse_dataset_path(path: str) -> Dict[str, str]: + """Parses a dataset path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + return m.groupdict() if m else {} @staticmethod def model_path(project: str, location: str, model: str,) -> str: - """Return a fully-qualified model string.""" + """Returns a fully-qualified model string.""" return "projects/{project}/locations/{location}/models/{model}".format( project=project, location=location, model=model, ) @staticmethod def parse_model_path(path: str) -> Dict[str, str]: - """Parse a model path into its component segments.""" + """Parses a model path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path, @@ -239,14 +241,14 @@ def parse_model_path(path: str) -> Dict[str, str]: @staticmethod def model_path(project: str, location: str, model: str,) -> str: - """Return a fully-qualified model string.""" + """Returns a fully-qualified model string.""" return "projects/{project}/locations/{location}/models/{model}".format( project=project, location=location, model=model, ) @staticmethod def parse_model_path(path: str) -> Dict[str, str]: - """Parse a model path into its component segments.""" + """Parses a model path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path, @@ -255,14 +257,14 @@ def parse_model_path(path: str) -> Dict[str, str]: @staticmethod def version_path(project: str, model: str, version: str,) -> str: - """Return a fully-qualified version string.""" + """Returns a fully-qualified version string.""" return "projects/{project}/models/{model}/versions/{version}".format( project=project, model=model, version=version, ) @staticmethod def parse_version_path(path: str) -> Dict[str, str]: - """Parse a version path into its component segments.""" + """Parses a version path into its component segments.""" m = re.match( r"^projects/(?P.+?)/models/(?P.+?)/versions/(?P.+?)$", path, @@ -271,7 +273,7 @@ def parse_version_path(path: str) -> Dict[str, str]: @staticmethod def common_billing_account_path(billing_account: str,) -> str: - """Return a fully-qualified billing_account string.""" + """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -284,7 +286,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: @staticmethod def common_folder_path(folder: str,) -> str: - """Return a fully-qualified folder string.""" + """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod @@ -295,7 +297,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: @staticmethod def common_organization_path(organization: str,) -> str: - """Return a fully-qualified organization string.""" + """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod @@ -306,7 +308,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: @staticmethod def common_project_path(project: str,) -> str: - """Return a fully-qualified project string.""" + """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod @@ -317,7 +319,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]: @staticmethod def common_location_path(project: str, location: str,) -> str: - """Return a fully-qualified location string.""" + """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -336,7 +338,7 @@ def __init__( client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the migration service client. + """Instantiates the migration service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -391,9 +393,10 @@ def __init__( client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() - client_cert_source_func = ( - mtls.default_client_cert_source() if is_mtls else None - ) + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -405,12 +408,14 @@ def __init__( elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": - api_endpoint = ( - self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT - ) + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" ) # Save or instantiate the transport. @@ -425,8 +430,8 @@ def __init__( ) if client_options.scopes: raise ValueError( - "When providing a transport instance, " - "provide its scopes directly." + "When providing a transport instance, provide its scopes " + "directly." ) self._transport = transport else: @@ -452,7 +457,7 @@ def search_migratable_resources( ) -> pagers.SearchMigratableResourcesPager: r"""Searches all of the resources in automl.googleapis.com, datalabeling.googleapis.com and - ml.googleapis.com that can be migrated to AI Platform's + ml.googleapis.com that can be migrated to Vertex AI's given location. Args: @@ -461,7 +466,7 @@ def search_migratable_resources( [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. parent (str): Required. The location that the migratable resources - should be searched from. It's the AI Platform location + should be searched from. It's the Vertex AI location that the resources can be migrated to, not the resources' original location. Format: ``projects/{project}/locations/{location}`` @@ -543,7 +548,7 @@ def batch_migrate_resources( ) -> operation.Operation: r"""Batch migrates resources from ml.googleapis.com, automl.googleapis.com, and datalabeling.googleapis.com - to AI Platform (Unified). + to Vertex AI. Args: request (google.cloud.aiplatform_v1.types.BatchMigrateResourcesRequest): diff --git a/google/cloud/aiplatform_v1/services/migration_service/pagers.py b/google/cloud/aiplatform_v1/services/migration_service/pagers.py index 331763e599..e62dbdc3e5 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/migration_service/pagers.py @@ -118,7 +118,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py index e138cb32c6..c237f45c8e 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py @@ -34,8 +34,7 @@ class MigrationServiceGrpcTransport(MigrationServiceTransport): """gRPC backend transport for MigrationService. A service that migrates resources from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to AI - Platform. + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation @@ -251,7 +250,7 @@ def search_migratable_resources( Searches all of the resources in automl.googleapis.com, datalabeling.googleapis.com and - ml.googleapis.com that can be migrated to AI Platform's + ml.googleapis.com that can be migrated to Vertex AI's given location. Returns: @@ -282,7 +281,7 @@ def batch_migrate_resources( Batch migrates resources from ml.googleapis.com, automl.googleapis.com, and datalabeling.googleapis.com - to AI Platform (Unified). + to Vertex AI. Returns: Callable[[~.BatchMigrateResourcesRequest], diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py index ba8d3a4841..79355dcb4b 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py @@ -36,8 +36,7 @@ class MigrationServiceGrpcAsyncIOTransport(MigrationServiceTransport): """gRPC AsyncIO backend transport for MigrationService. A service that migrates resources from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to AI - Platform. + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation @@ -256,7 +255,7 @@ def search_migratable_resources( Searches all of the resources in automl.googleapis.com, datalabeling.googleapis.com and - ml.googleapis.com that can be migrated to AI Platform's + ml.googleapis.com that can be migrated to Vertex AI's given location. Returns: @@ -288,7 +287,7 @@ def batch_migrate_resources( Batch migrates resources from ml.googleapis.com, automl.googleapis.com, and datalabeling.googleapis.com - to AI Platform (Unified). + to Vertex AI. Returns: Callable[[~.BatchMigrateResourcesRequest], diff --git a/google/cloud/aiplatform_v1/services/model_service/async_client.py b/google/cloud/aiplatform_v1/services/model_service/async_client.py index d6d932f62f..2d46ccb613 100644 --- a/google/cloud/aiplatform_v1/services/model_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/model_service/async_client.py @@ -47,7 +47,7 @@ class ModelServiceAsyncClient: - """A service for managing AI Platform's machine learning Models.""" + """A service for managing Vertex AI's machine learning Models.""" _client: ModelServiceClient @@ -95,7 +95,8 @@ class ModelServiceAsyncClient: @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -110,7 +111,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -127,7 +128,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> ModelServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: ModelServiceTransport: The transport used by the client instance. @@ -146,7 +147,7 @@ def __init__( client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the model service client. + """Instantiates the model service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -195,7 +196,7 @@ async def upload_model( timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: - r"""Uploads a Model artifact into AI Platform. + r"""Uploads a Model artifact into Vertex AI. Args: request (:class:`google.cloud.aiplatform_v1.types.UploadModelRequest`): @@ -454,7 +455,7 @@ async def update_model( update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): Required. The update mask applies to the resource. For the ``FieldMask`` definition, see - `FieldMask `__. + [google.protobuf.FieldMask][google.protobuf.FieldMask]. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -721,7 +722,6 @@ async def get_model_evaluation( name (:class:`str`): Required. The name of the ModelEvaluation resource. Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` This corresponds to the ``name`` field @@ -877,7 +877,6 @@ async def get_model_evaluation_slice( name (:class:`str`): Required. The name of the ModelEvaluationSlice resource. Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}`` This corresponds to the ``name`` field @@ -952,7 +951,6 @@ async def list_model_evaluation_slices( parent (:class:`str`): Required. The resource name of the ModelEvaluation to list the ModelEvaluationSlices from. Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` This corresponds to the ``parent`` field diff --git a/google/cloud/aiplatform_v1/services/model_service/client.py b/google/cloud/aiplatform_v1/services/model_service/client.py index 6a1f353dbc..0011d2541c 100644 --- a/google/cloud/aiplatform_v1/services/model_service/client.py +++ b/google/cloud/aiplatform_v1/services/model_service/client.py @@ -63,7 +63,7 @@ class ModelServiceClientMeta(type): _transport_registry["grpc_asyncio"] = ModelServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[ModelServiceTransport]: - """Return an appropriate transport class. + """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is @@ -82,11 +82,12 @@ def get_transport_class(cls, label: str = None,) -> Type[ModelServiceTransport]: class ModelServiceClient(metaclass=ModelServiceClientMeta): - """A service for managing AI Platform's machine learning Models.""" + """A service for managing Vertex AI's machine learning Models.""" @staticmethod def _get_default_mtls_endpoint(api_endpoint): - """Convert api endpoint to mTLS endpoint. + """Converts api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: @@ -120,7 +121,8 @@ def _get_default_mtls_endpoint(api_endpoint): @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -137,7 +139,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -156,23 +158,24 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> ModelServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: - ModelServiceTransport: The transport used by the client instance. + ModelServiceTransport: The transport used by the client + instance. """ return self._transport @staticmethod def endpoint_path(project: str, location: str, endpoint: str,) -> str: - """Return a fully-qualified endpoint string.""" + """Returns a fully-qualified endpoint string.""" return "projects/{project}/locations/{location}/endpoints/{endpoint}".format( project=project, location=location, endpoint=endpoint, ) @staticmethod def parse_endpoint_path(path: str) -> Dict[str, str]: - """Parse a endpoint path into its component segments.""" + """Parses a endpoint path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path, @@ -181,14 +184,14 @@ def parse_endpoint_path(path: str) -> Dict[str, str]: @staticmethod def model_path(project: str, location: str, model: str,) -> str: - """Return a fully-qualified model string.""" + """Returns a fully-qualified model string.""" return "projects/{project}/locations/{location}/models/{model}".format( project=project, location=location, model=model, ) @staticmethod def parse_model_path(path: str) -> Dict[str, str]: - """Parse a model path into its component segments.""" + """Parses a model path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path, @@ -199,14 +202,14 @@ def parse_model_path(path: str) -> Dict[str, str]: def model_evaluation_path( project: str, location: str, model: str, evaluation: str, ) -> str: - """Return a fully-qualified model_evaluation string.""" + """Returns a fully-qualified model_evaluation string.""" return "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}".format( project=project, location=location, model=model, evaluation=evaluation, ) @staticmethod def parse_model_evaluation_path(path: str) -> Dict[str, str]: - """Parse a model_evaluation path into its component segments.""" + """Parses a model_evaluation path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/evaluations/(?P.+?)$", path, @@ -217,7 +220,7 @@ def parse_model_evaluation_path(path: str) -> Dict[str, str]: def model_evaluation_slice_path( project: str, location: str, model: str, evaluation: str, slice: str, ) -> str: - """Return a fully-qualified model_evaluation_slice string.""" + """Returns a fully-qualified model_evaluation_slice string.""" return "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}".format( project=project, location=location, @@ -228,7 +231,7 @@ def model_evaluation_slice_path( @staticmethod def parse_model_evaluation_slice_path(path: str) -> Dict[str, str]: - """Parse a model_evaluation_slice path into its component segments.""" + """Parses a model_evaluation_slice path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/evaluations/(?P.+?)/slices/(?P.+?)$", path, @@ -239,14 +242,14 @@ def parse_model_evaluation_slice_path(path: str) -> Dict[str, str]: def training_pipeline_path( project: str, location: str, training_pipeline: str, ) -> str: - """Return a fully-qualified training_pipeline string.""" + """Returns a fully-qualified training_pipeline string.""" return "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format( project=project, location=location, training_pipeline=training_pipeline, ) @staticmethod def parse_training_pipeline_path(path: str) -> Dict[str, str]: - """Parse a training_pipeline path into its component segments.""" + """Parses a training_pipeline path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/trainingPipelines/(?P.+?)$", path, @@ -255,7 +258,7 @@ def parse_training_pipeline_path(path: str) -> Dict[str, str]: @staticmethod def common_billing_account_path(billing_account: str,) -> str: - """Return a fully-qualified billing_account string.""" + """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -268,7 +271,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: @staticmethod def common_folder_path(folder: str,) -> str: - """Return a fully-qualified folder string.""" + """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod @@ -279,7 +282,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: @staticmethod def common_organization_path(organization: str,) -> str: - """Return a fully-qualified organization string.""" + """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod @@ -290,7 +293,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: @staticmethod def common_project_path(project: str,) -> str: - """Return a fully-qualified project string.""" + """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod @@ -301,7 +304,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]: @staticmethod def common_location_path(project: str, location: str,) -> str: - """Return a fully-qualified location string.""" + """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -320,7 +323,7 @@ def __init__( client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the model service client. + """Instantiates the model service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -375,9 +378,10 @@ def __init__( client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() - client_cert_source_func = ( - mtls.default_client_cert_source() if is_mtls else None - ) + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -389,12 +393,14 @@ def __init__( elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": - api_endpoint = ( - self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT - ) + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" ) # Save or instantiate the transport. @@ -409,8 +415,8 @@ def __init__( ) if client_options.scopes: raise ValueError( - "When providing a transport instance, " - "provide its scopes directly." + "When providing a transport instance, provide its scopes " + "directly." ) self._transport = transport else: @@ -435,7 +441,7 @@ def upload_model( timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gac_operation.Operation: - r"""Uploads a Model artifact into AI Platform. + r"""Uploads a Model artifact into Vertex AI. Args: request (google.cloud.aiplatform_v1.types.UploadModelRequest): @@ -694,7 +700,7 @@ def update_model( update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The update mask applies to the resource. For the ``FieldMask`` definition, see - `FieldMask `__. + [google.protobuf.FieldMask][google.protobuf.FieldMask]. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -961,7 +967,6 @@ def get_model_evaluation( name (str): Required. The name of the ModelEvaluation resource. Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` This corresponds to the ``name`` field @@ -1117,7 +1122,6 @@ def get_model_evaluation_slice( name (str): Required. The name of the ModelEvaluationSlice resource. Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}`` This corresponds to the ``name`` field @@ -1194,7 +1198,6 @@ def list_model_evaluation_slices( parent (str): Required. The resource name of the ModelEvaluation to list the ModelEvaluationSlices from. Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` This corresponds to the ``parent`` field diff --git a/google/cloud/aiplatform_v1/services/model_service/pagers.py b/google/cloud/aiplatform_v1/services/model_service/pagers.py index b178c162d2..5cb1620854 100644 --- a/google/cloud/aiplatform_v1/services/model_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/model_service/pagers.py @@ -118,7 +118,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -246,7 +246,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -376,7 +376,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py index 91479bd3ae..04612d0721 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py @@ -37,7 +37,7 @@ class ModelServiceGrpcTransport(ModelServiceTransport): """gRPC backend transport for ModelService. - A service for managing AI Platform's machine learning Models. + A service for managing Vertex AI's machine learning Models. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation @@ -248,7 +248,7 @@ def upload_model( ) -> Callable[[model_service.UploadModelRequest], operations_pb2.Operation]: r"""Return a callable for the upload model method over gRPC. - Uploads a Model artifact into AI Platform. + Uploads a Model artifact into Vertex AI. Returns: Callable[[~.UploadModelRequest], diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py index 0ce380ad4a..877db5fe51 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py @@ -39,7 +39,7 @@ class ModelServiceGrpcAsyncIOTransport(ModelServiceTransport): """gRPC AsyncIO backend transport for ModelService. - A service for managing AI Platform's machine learning Models. + A service for managing Vertex AI's machine learning Models. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation @@ -255,7 +255,7 @@ def upload_model( ]: r"""Return a callable for the upload model method over gRPC. - Uploads a Model artifact into AI Platform. + Uploads a Model artifact into Vertex AI. Returns: Callable[[~.UploadModelRequest], diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py b/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py index 6d3e8ed6ec..5da3ad8022 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py @@ -46,7 +46,11 @@ class PipelineServiceAsyncClient: - """A service for creating and managing AI Platform's pipelines.""" + """A service for creating and managing Vertex AI's pipelines. This + includes both ``TrainingPipeline`` resources (used for AutoML and + custom training) and ``PipelineJob`` resources (used for Vertex + Pipelines). + """ _client: PipelineServiceClient @@ -88,7 +92,8 @@ class PipelineServiceAsyncClient: @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -103,7 +108,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -120,7 +125,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> PipelineServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: PipelineServiceTransport: The transport used by the client instance. @@ -139,7 +144,7 @@ def __init__( client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the pipeline service client. + """Instantiates the pipeline service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -220,10 +225,10 @@ async def create_training_pipeline( google.cloud.aiplatform_v1.types.TrainingPipeline: The TrainingPipeline orchestrates tasks associated with training a Model. It always executes the training task, and optionally may - also export data from AI Platform's Dataset which + also export data from Vertex AI's Dataset which becomes the training input, [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] - the Model to AI Platform, and evaluate the Model. + the Model to Vertex AI, and evaluate the Model. """ # Create or coerce a protobuf request object. @@ -283,7 +288,6 @@ async def get_training_pipeline( name (:class:`str`): Required. The name of the TrainingPipeline resource. Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` This corresponds to the ``name`` field @@ -299,10 +303,10 @@ async def get_training_pipeline( google.cloud.aiplatform_v1.types.TrainingPipeline: The TrainingPipeline orchestrates tasks associated with training a Model. It always executes the training task, and optionally may - also export data from AI Platform's Dataset which + also export data from Vertex AI's Dataset which becomes the training input, [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] - the Model to AI Platform, and evaluate the Model. + the Model to Vertex AI, and evaluate the Model. """ # Create or coerce a protobuf request object. @@ -441,7 +445,6 @@ async def delete_training_pipeline( name (:class:`str`): Required. The name of the TrainingPipeline resource to be deleted. Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` This corresponds to the ``name`` field @@ -547,7 +550,6 @@ async def cancel_training_pipeline( name (:class:`str`): Required. The name of the TrainingPipeline to cancel. Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` This corresponds to the ``name`` field diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/client.py b/google/cloud/aiplatform_v1/services/pipeline_service/client.py index 73c1f37a1a..9ea5595638 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/client.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/client.py @@ -64,7 +64,7 @@ class PipelineServiceClientMeta(type): _transport_registry["grpc_asyncio"] = PipelineServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[PipelineServiceTransport]: - """Return an appropriate transport class. + """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is @@ -83,11 +83,16 @@ def get_transport_class(cls, label: str = None,) -> Type[PipelineServiceTranspor class PipelineServiceClient(metaclass=PipelineServiceClientMeta): - """A service for creating and managing AI Platform's pipelines.""" + """A service for creating and managing Vertex AI's pipelines. This + includes both ``TrainingPipeline`` resources (used for AutoML and + custom training) and ``PipelineJob`` resources (used for Vertex + Pipelines). + """ @staticmethod def _get_default_mtls_endpoint(api_endpoint): - """Convert api endpoint to mTLS endpoint. + """Converts api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: @@ -121,7 +126,8 @@ def _get_default_mtls_endpoint(api_endpoint): @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -138,7 +144,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -157,23 +163,24 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> PipelineServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: - PipelineServiceTransport: The transport used by the client instance. + PipelineServiceTransport: The transport used by the client + instance. """ return self._transport @staticmethod def endpoint_path(project: str, location: str, endpoint: str,) -> str: - """Return a fully-qualified endpoint string.""" + """Returns a fully-qualified endpoint string.""" return "projects/{project}/locations/{location}/endpoints/{endpoint}".format( project=project, location=location, endpoint=endpoint, ) @staticmethod def parse_endpoint_path(path: str) -> Dict[str, str]: - """Parse a endpoint path into its component segments.""" + """Parses a endpoint path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path, @@ -182,14 +189,14 @@ def parse_endpoint_path(path: str) -> Dict[str, str]: @staticmethod def model_path(project: str, location: str, model: str,) -> str: - """Return a fully-qualified model string.""" + """Returns a fully-qualified model string.""" return "projects/{project}/locations/{location}/models/{model}".format( project=project, location=location, model=model, ) @staticmethod def parse_model_path(path: str) -> Dict[str, str]: - """Parse a model path into its component segments.""" + """Parses a model path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path, @@ -200,14 +207,14 @@ def parse_model_path(path: str) -> Dict[str, str]: def training_pipeline_path( project: str, location: str, training_pipeline: str, ) -> str: - """Return a fully-qualified training_pipeline string.""" + """Returns a fully-qualified training_pipeline string.""" return "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format( project=project, location=location, training_pipeline=training_pipeline, ) @staticmethod def parse_training_pipeline_path(path: str) -> Dict[str, str]: - """Parse a training_pipeline path into its component segments.""" + """Parses a training_pipeline path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/trainingPipelines/(?P.+?)$", path, @@ -216,7 +223,7 @@ def parse_training_pipeline_path(path: str) -> Dict[str, str]: @staticmethod def common_billing_account_path(billing_account: str,) -> str: - """Return a fully-qualified billing_account string.""" + """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -229,7 +236,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: @staticmethod def common_folder_path(folder: str,) -> str: - """Return a fully-qualified folder string.""" + """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod @@ -240,7 +247,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: @staticmethod def common_organization_path(organization: str,) -> str: - """Return a fully-qualified organization string.""" + """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod @@ -251,7 +258,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: @staticmethod def common_project_path(project: str,) -> str: - """Return a fully-qualified project string.""" + """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod @@ -262,7 +269,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]: @staticmethod def common_location_path(project: str, location: str,) -> str: - """Return a fully-qualified location string.""" + """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -281,7 +288,7 @@ def __init__( client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the pipeline service client. + """Instantiates the pipeline service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -336,9 +343,10 @@ def __init__( client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() - client_cert_source_func = ( - mtls.default_client_cert_source() if is_mtls else None - ) + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -350,12 +358,14 @@ def __init__( elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": - api_endpoint = ( - self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT - ) + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" ) # Save or instantiate the transport. @@ -370,8 +380,8 @@ def __init__( ) if client_options.scopes: raise ValueError( - "When providing a transport instance, " - "provide its scopes directly." + "When providing a transport instance, provide its scopes " + "directly." ) self._transport = transport else: @@ -428,10 +438,10 @@ def create_training_pipeline( google.cloud.aiplatform_v1.types.TrainingPipeline: The TrainingPipeline orchestrates tasks associated with training a Model. It always executes the training task, and optionally may - also export data from AI Platform's Dataset which + also export data from Vertex AI's Dataset which becomes the training input, [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] - the Model to AI Platform, and evaluate the Model. + the Model to Vertex AI, and evaluate the Model. """ # Create or coerce a protobuf request object. @@ -491,7 +501,6 @@ def get_training_pipeline( name (str): Required. The name of the TrainingPipeline resource. Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` This corresponds to the ``name`` field @@ -507,10 +516,10 @@ def get_training_pipeline( google.cloud.aiplatform_v1.types.TrainingPipeline: The TrainingPipeline orchestrates tasks associated with training a Model. It always executes the training task, and optionally may - also export data from AI Platform's Dataset which + also export data from Vertex AI's Dataset which becomes the training input, [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] - the Model to AI Platform, and evaluate the Model. + the Model to Vertex AI, and evaluate the Model. """ # Create or coerce a protobuf request object. @@ -649,7 +658,6 @@ def delete_training_pipeline( name (str): Required. The name of the TrainingPipeline resource to be deleted. Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` This corresponds to the ``name`` field @@ -755,7 +763,6 @@ def cancel_training_pipeline( name (str): Required. The name of the TrainingPipeline to cancel. Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` This corresponds to the ``name`` field diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py b/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py index cb70e4585b..280d676b4b 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py @@ -118,7 +118,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py index 9c86da69d3..78a4b0aeb5 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py @@ -36,7 +36,10 @@ class PipelineServiceGrpcTransport(PipelineServiceTransport): """gRPC backend transport for PipelineService. - A service for creating and managing AI Platform's pipelines. + A service for creating and managing Vertex AI's pipelines. This + includes both ``TrainingPipeline`` resources (used for AutoML and + custom training) and ``PipelineJob`` resources (used for Vertex + Pipelines). This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py index 98ee8ec8c2..9ba0858d40 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py @@ -38,7 +38,10 @@ class PipelineServiceGrpcAsyncIOTransport(PipelineServiceTransport): """gRPC AsyncIO backend transport for PipelineService. - A service for creating and managing AI Platform's pipelines. + A service for creating and managing Vertex AI's pipelines. This + includes both ``TrainingPipeline`` resources (used for AutoML and + custom training) and ``PipelineJob`` resources (used for Vertex + Pipelines). This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation diff --git a/google/cloud/aiplatform_v1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1/services/prediction_service/async_client.py index bec5e54ea1..fe69ba8c58 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/async_client.py @@ -70,7 +70,8 @@ class PredictionServiceAsyncClient: @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -85,7 +86,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -102,7 +103,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> PredictionServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: PredictionServiceTransport: The transport used by the client instance. @@ -121,7 +122,7 @@ def __init__( client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the prediction service client. + """Instantiates the prediction service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The diff --git a/google/cloud/aiplatform_v1/services/prediction_service/client.py b/google/cloud/aiplatform_v1/services/prediction_service/client.py index 2bbfba4c5c..6caef31722 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/client.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/client.py @@ -54,7 +54,7 @@ class PredictionServiceClientMeta(type): def get_transport_class( cls, label: str = None, ) -> Type[PredictionServiceTransport]: - """Return an appropriate transport class. + """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is @@ -77,7 +77,8 @@ class PredictionServiceClient(metaclass=PredictionServiceClientMeta): @staticmethod def _get_default_mtls_endpoint(api_endpoint): - """Convert api endpoint to mTLS endpoint. + """Converts api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: @@ -111,7 +112,8 @@ def _get_default_mtls_endpoint(api_endpoint): @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -128,7 +130,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -147,23 +149,24 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> PredictionServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: - PredictionServiceTransport: The transport used by the client instance. + PredictionServiceTransport: The transport used by the client + instance. """ return self._transport @staticmethod def endpoint_path(project: str, location: str, endpoint: str,) -> str: - """Return a fully-qualified endpoint string.""" + """Returns a fully-qualified endpoint string.""" return "projects/{project}/locations/{location}/endpoints/{endpoint}".format( project=project, location=location, endpoint=endpoint, ) @staticmethod def parse_endpoint_path(path: str) -> Dict[str, str]: - """Parse a endpoint path into its component segments.""" + """Parses a endpoint path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path, @@ -172,7 +175,7 @@ def parse_endpoint_path(path: str) -> Dict[str, str]: @staticmethod def common_billing_account_path(billing_account: str,) -> str: - """Return a fully-qualified billing_account string.""" + """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -185,7 +188,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: @staticmethod def common_folder_path(folder: str,) -> str: - """Return a fully-qualified folder string.""" + """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod @@ -196,7 +199,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: @staticmethod def common_organization_path(organization: str,) -> str: - """Return a fully-qualified organization string.""" + """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod @@ -207,7 +210,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: @staticmethod def common_project_path(project: str,) -> str: - """Return a fully-qualified project string.""" + """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod @@ -218,7 +221,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]: @staticmethod def common_location_path(project: str, location: str,) -> str: - """Return a fully-qualified location string.""" + """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -237,7 +240,7 @@ def __init__( client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the prediction service client. + """Instantiates the prediction service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -292,9 +295,10 @@ def __init__( client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() - client_cert_source_func = ( - mtls.default_client_cert_source() if is_mtls else None - ) + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -306,12 +310,14 @@ def __init__( elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": - api_endpoint = ( - self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT - ) + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" ) # Save or instantiate the transport. @@ -326,8 +332,8 @@ def __init__( ) if client_options.scopes: raise ValueError( - "When providing a transport instance, " - "provide its scopes directly." + "When providing a transport instance, provide its scopes " + "directly." ) self._transport = transport else: diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py index 704ee2ab21..6120e7983f 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py @@ -89,7 +89,8 @@ class SpecialistPoolServiceAsyncClient: @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -104,7 +105,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -121,7 +122,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> SpecialistPoolServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: SpecialistPoolServiceTransport: The transport used by the client instance. @@ -141,7 +142,7 @@ def __init__( client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the specialist pool service client. + """Instantiates the specialist pool service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -296,7 +297,6 @@ async def get_specialist_pool( name (:class:`str`): Required. The name of the SpecialistPool resource. The form is - ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``. This corresponds to the ``name`` field diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py index 310211d4dd..96ce2a35cb 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py @@ -61,7 +61,7 @@ class SpecialistPoolServiceClientMeta(type): def get_transport_class( cls, label: str = None, ) -> Type[SpecialistPoolServiceTransport]: - """Return an appropriate transport class. + """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is @@ -90,7 +90,8 @@ class SpecialistPoolServiceClient(metaclass=SpecialistPoolServiceClientMeta): @staticmethod def _get_default_mtls_endpoint(api_endpoint): - """Convert api endpoint to mTLS endpoint. + """Converts api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: @@ -124,7 +125,8 @@ def _get_default_mtls_endpoint(api_endpoint): @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -141,7 +143,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -160,23 +162,24 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> SpecialistPoolServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: - SpecialistPoolServiceTransport: The transport used by the client instance. + SpecialistPoolServiceTransport: The transport used by the client + instance. """ return self._transport @staticmethod def specialist_pool_path(project: str, location: str, specialist_pool: str,) -> str: - """Return a fully-qualified specialist_pool string.""" + """Returns a fully-qualified specialist_pool string.""" return "projects/{project}/locations/{location}/specialistPools/{specialist_pool}".format( project=project, location=location, specialist_pool=specialist_pool, ) @staticmethod def parse_specialist_pool_path(path: str) -> Dict[str, str]: - """Parse a specialist_pool path into its component segments.""" + """Parses a specialist_pool path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/specialistPools/(?P.+?)$", path, @@ -185,7 +188,7 @@ def parse_specialist_pool_path(path: str) -> Dict[str, str]: @staticmethod def common_billing_account_path(billing_account: str,) -> str: - """Return a fully-qualified billing_account string.""" + """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -198,7 +201,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: @staticmethod def common_folder_path(folder: str,) -> str: - """Return a fully-qualified folder string.""" + """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod @@ -209,7 +212,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: @staticmethod def common_organization_path(organization: str,) -> str: - """Return a fully-qualified organization string.""" + """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod @@ -220,7 +223,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: @staticmethod def common_project_path(project: str,) -> str: - """Return a fully-qualified project string.""" + """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod @@ -231,7 +234,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]: @staticmethod def common_location_path(project: str, location: str,) -> str: - """Return a fully-qualified location string.""" + """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -250,7 +253,7 @@ def __init__( client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the specialist pool service client. + """Instantiates the specialist pool service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -305,9 +308,10 @@ def __init__( client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() - client_cert_source_func = ( - mtls.default_client_cert_source() if is_mtls else None - ) + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -319,12 +323,14 @@ def __init__( elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": - api_endpoint = ( - self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT - ) + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" ) # Save or instantiate the transport. @@ -339,8 +345,8 @@ def __init__( ) if client_options.scopes: raise ValueError( - "When providing a transport instance, " - "provide its scopes directly." + "When providing a transport instance, provide its scopes " + "directly." ) self._transport = transport else: @@ -471,7 +477,6 @@ def get_specialist_pool( name (str): Required. The name of the SpecialistPool resource. The form is - ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``. This corresponds to the ``name`` field diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/pagers.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/pagers.py index afc17c2fde..a6139b433a 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/pagers.py @@ -118,7 +118,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and diff --git a/google/cloud/aiplatform_v1/types/annotation.py b/google/cloud/aiplatform_v1/types/annotation.py index 0671829241..c44c1dba0e 100644 --- a/google/cloud/aiplatform_v1/types/annotation.py +++ b/google/cloud/aiplatform_v1/types/annotation.py @@ -37,8 +37,8 @@ class Annotation(proto.Message): describing [payload][google.cloud.aiplatform.v1.Annotation.payload]. The schema is defined as an `OpenAPI 3.0.2 Schema - Object `__. The schema files - that can be used here are found in + Object `__. + The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the chosen schema must be consistent with the parent Dataset's diff --git a/google/cloud/aiplatform_v1/types/batch_prediction_job.py b/google/cloud/aiplatform_v1/types/batch_prediction_job.py index 757ee3164e..32194179ab 100644 --- a/google/cloud/aiplatform_v1/types/batch_prediction_job.py +++ b/google/cloud/aiplatform_v1/types/batch_prediction_job.py @@ -88,8 +88,7 @@ class BatchPredictionJob(proto.Message): Immutable. Parameters configuring the batch behavior. Currently only applicable when [dedicated_resources][google.cloud.aiplatform.v1.BatchPredictionJob.dedicated_resources] - are used (in other cases AI Platform does the tuning - itself). + are used (in other cases Vertex AI does the tuning itself). output_info (google.cloud.aiplatform_v1.types.BatchPredictionJob.OutputInfo): Output only. Information further describing the output of this job. @@ -216,9 +215,9 @@ class OutputConfig(proto.Message): which as value has ```google.rpc.Status`` `__ containing only ``code`` and ``message`` fields. bigquery_destination (google.cloud.aiplatform_v1.types.BigQueryDestination): - The BigQuery project location where the output is to be - written to. In the given project a new dataset is created - with name + The BigQuery project or dataset location where the output is + to be written to. If project is provided, a new dataset is + created with name ``prediction__`` where is made BigQuery-dataset-name compatible (for example, most special characters become underscores), and timestamp is in @@ -238,10 +237,9 @@ class OutputConfig(proto.Message): ```google.rpc.Status`` `__ represented as a STRUCT, and containing only ``code`` and ``message``. predictions_format (str): - Required. The format in which AI Platform gives the + Required. The format in which Vertex AI gives the predictions, must be one of the [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model] - [supported_output_storage_formats][google.cloud.aiplatform.v1.Model.supported_output_storage_formats]. """ diff --git a/google/cloud/aiplatform_v1/types/custom_job.py b/google/cloud/aiplatform_v1/types/custom_job.py index 55fe308c87..197ddcb78f 100644 --- a/google/cloud/aiplatform_v1/types/custom_job.py +++ b/google/cloud/aiplatform_v1/types/custom_job.py @@ -109,15 +109,18 @@ class CustomJobSpec(proto.Message): Attributes: worker_pool_specs (Sequence[google.cloud.aiplatform_v1.types.WorkerPoolSpec]): Required. The spec of the worker pools - including machine type and Docker image. + including machine type and Docker image. All + worker pools except the first one are optional + and can be skipped by providing an empty value. scheduling (google.cloud.aiplatform_v1.types.Scheduling): Scheduling options for a CustomJob. service_account (str): - Specifies the service account for workload - run-as account. Users submitting jobs must have - act-as permission on this run-as account. If - unspecified, the AI Platform Custom Code Service - Agent for the CustomJob's project is used. + Specifies the service account for workload run-as account. + Users submitting jobs must have act-as permission on this + run-as account. If unspecified, the `AI Platform Custom Code + Service + Agent `__ + for the CustomJob's project is used. network (str): The full name of the Compute Engine `network `__ @@ -140,9 +143,8 @@ class CustomJobSpec(proto.Message): name [id][google.cloud.aiplatform.v1.Trial.id] under its parent HyperparameterTuningJob's baseOutputDirectory. - The following AI Platform environment variables will be - passed to containers or python modules when this field is - set: + The following Vertex AI environment variables will be passed + to containers or python modules when this field is set: For CustomJob: @@ -234,12 +236,13 @@ class PythonPackageSpec(proto.Message): r"""The spec of a Python packaged code. Attributes: executor_image_uri (str): - Required. The URI of a container image in the - Container Registry that will run the provided - python package. AI Platform provides wide range - of executor images with pre-installed packages - to meet users' various use cases. Only one of - the provided images can be set here. + Required. The URI of a container image in Artifact Registry + that will run the provided Python package. Vertex AI + provides a wide range of executor images with pre-installed + packages to meet users' various use cases. See the list of + `pre-built containers for + training `__. + You must use an image from this list. package_uris (Sequence[str]): Required. The Google Cloud Storage location of the Python package files which are the diff --git a/google/cloud/aiplatform_v1/types/dataset.py b/google/cloud/aiplatform_v1/types/dataset.py index d1d8f8f363..efa160d21b 100644 --- a/google/cloud/aiplatform_v1/types/dataset.py +++ b/google/cloud/aiplatform_v1/types/dataset.py @@ -126,7 +126,7 @@ class ImportDataConfig(proto.Message): Storage describing the import format. Validation will be done against the schema. The schema is defined as an `OpenAPI 3.0.2 Schema - Object `__. + Object `__. """ gcs_source = proto.Field( diff --git a/google/cloud/aiplatform_v1/types/dataset_service.py b/google/cloud/aiplatform_v1/types/dataset_service.py index 3305dc7268..f54b5ec376 100644 --- a/google/cloud/aiplatform_v1/types/dataset_service.py +++ b/google/cloud/aiplatform_v1/types/dataset_service.py @@ -104,7 +104,7 @@ class UpdateDatasetRequest(proto.Message): update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The update mask applies to the resource. For the ``FieldMask`` definition, see - `FieldMask `__. + [google.protobuf.FieldMask][google.protobuf.FieldMask]. Updatable fields: - ``display_name`` @@ -353,7 +353,6 @@ class GetAnnotationSpecRequest(proto.Message): Attributes: name (str): Required. The name of the AnnotationSpec resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}`` read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. @@ -371,7 +370,6 @@ class ListAnnotationsRequest(proto.Message): parent (str): Required. The resource name of the DataItem to list Annotations from. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}`` filter (str): The standard list filter. diff --git a/google/cloud/aiplatform_v1/types/endpoint_service.py b/google/cloud/aiplatform_v1/types/endpoint_service.py index 688336ab71..a2472d9d00 100644 --- a/google/cloud/aiplatform_v1/types/endpoint_service.py +++ b/google/cloud/aiplatform_v1/types/endpoint_service.py @@ -180,7 +180,7 @@ class UpdateEndpointRequest(proto.Message): resource on the server. update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The update mask applies to the resource. See - `FieldMask `__. + [google.protobuf.FieldMask][google.protobuf.FieldMask]. """ endpoint = proto.Field(proto.MESSAGE, number=1, message=gca_endpoint.Endpoint,) diff --git a/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py b/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py index d5485873a6..ea6f25482d 100644 --- a/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py +++ b/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py @@ -53,7 +53,7 @@ class HyperparameterTuningJob(proto.Message): max_failed_trial_count (int): The number of failed Trials that need to be seen before failing the HyperparameterTuningJob. - If set to 0, AI Platform decides how many Trials + If set to 0, Vertex AI decides how many Trials must fail before the whole job fails. trial_job_spec (google.cloud.aiplatform_v1.types.CustomJobSpec): Required. The spec of a trial job. The same diff --git a/google/cloud/aiplatform_v1/types/io.py b/google/cloud/aiplatform_v1/types/io.py index 243ec1a745..d23f3d91db 100644 --- a/google/cloud/aiplatform_v1/types/io.py +++ b/google/cloud/aiplatform_v1/types/io.py @@ -77,8 +77,8 @@ class BigQueryDestination(proto.Message): Required. BigQuery URI to a project or table, up to 2000 characters long. - When only the project is specified, the Dataset and Table - are created. When the full table reference is specified, the + When only the project is specified, the Dataset and Table is + created. When the full table reference is specified, the Dataset must exist and table must not exist. Accepted forms: diff --git a/google/cloud/aiplatform_v1/types/job_service.py b/google/cloud/aiplatform_v1/types/job_service.py index 331b91d6e4..65e51923f0 100644 --- a/google/cloud/aiplatform_v1/types/job_service.py +++ b/google/cloud/aiplatform_v1/types/job_service.py @@ -185,7 +185,7 @@ class CancelCustomJobRequest(proto.Message): class CreateDataLabelingJobRequest(proto.Message): r"""Request message for - [DataLabelingJobService.CreateDataLabelingJob][]. + [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1.JobService.CreateDataLabelingJob]. Attributes: parent (str): @@ -202,11 +202,12 @@ class CreateDataLabelingJobRequest(proto.Message): class GetDataLabelingJobRequest(proto.Message): - r"""Request message for [DataLabelingJobService.GetDataLabelingJob][]. + r"""Request message for + [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1.JobService.GetDataLabelingJob]. + Attributes: name (str): Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` """ @@ -214,7 +215,9 @@ class GetDataLabelingJobRequest(proto.Message): class ListDataLabelingJobsRequest(proto.Message): - r"""Request message for [DataLabelingJobService.ListDataLabelingJobs][]. + r"""Request message for + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. + Attributes: parent (str): Required. The parent of the DataLabelingJob. Format: @@ -291,7 +294,6 @@ class DeleteDataLabelingJobRequest(proto.Message): name (str): Required. The name of the DataLabelingJob to be deleted. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` """ @@ -300,12 +302,11 @@ class DeleteDataLabelingJobRequest(proto.Message): class CancelDataLabelingJobRequest(proto.Message): r"""Request message for - [DataLabelingJobService.CancelDataLabelingJob][]. + [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1.JobService.CancelDataLabelingJob]. Attributes: name (str): Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` """ @@ -342,7 +343,6 @@ class GetHyperparameterTuningJobRequest(proto.Message): name (str): Required. The name of the HyperparameterTuningJob resource. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` """ @@ -430,7 +430,6 @@ class DeleteHyperparameterTuningJobRequest(proto.Message): name (str): Required. The name of the HyperparameterTuningJob resource to be deleted. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` """ @@ -445,7 +444,6 @@ class CancelHyperparameterTuningJobRequest(proto.Message): name (str): Required. The name of the HyperparameterTuningJob to cancel. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` """ @@ -479,7 +477,6 @@ class GetBatchPredictionJobRequest(proto.Message): name (str): Required. The name of the BatchPredictionJob resource. Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` """ @@ -504,6 +501,8 @@ class ListBatchPredictionJobsRequest(proto.Message): - ``state`` supports = and !=. + - ``model_display_name`` supports = and != + Some examples of using the filter are: - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` @@ -564,7 +563,6 @@ class DeleteBatchPredictionJobRequest(proto.Message): name (str): Required. The name of the BatchPredictionJob resource to be deleted. Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` """ @@ -579,7 +577,6 @@ class CancelBatchPredictionJobRequest(proto.Message): name (str): Required. The name of the BatchPredictionJob to cancel. Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` """ diff --git a/google/cloud/aiplatform_v1/types/machine_resources.py b/google/cloud/aiplatform_v1/types/machine_resources.py index 8f76f04416..fb926e987d 100644 --- a/google/cloud/aiplatform_v1/types/machine_resources.py +++ b/google/cloud/aiplatform_v1/types/machine_resources.py @@ -35,11 +35,13 @@ class MachineSpec(proto.Message): r"""Specification of a single machine. Attributes: machine_type (str): - Immutable. The type of the machine. For the machine types - supported for prediction, see - https://tinyurl.com/aip-docs/predictions/machine-types. For - machine types supported for creating a custom training job, - see https://tinyurl.com/aip-docs/training/configure-compute. + Immutable. The type of the machine. + + See the `list of machine types supported for + prediction `__ + + See the `list of machine types supported for custom + training `__. For [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] @@ -105,7 +107,7 @@ class DedicatedResources(proto.Message): class AutomaticResources(proto.Message): r"""A description of resources that to large degree are decided - by AI Platform, and require only a modest additional + by Vertex AI, and require only a modest additional configuration. Each Model supporting these resources documents its specific guidelines. @@ -130,7 +132,7 @@ class AutomaticResources(proto.Message): its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under - heavy traffic will be assume, though AI Platform + heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number. """ @@ -150,7 +152,7 @@ class BatchDedicatedResources(proto.Message): single machine. starting_replica_count (int): Immutable. The number of machine replicas used at the start - of the batch operation. If not set, AI Platform decides + of the batch operation. If not set, Vertex AI decides starting number, not greater than [max_replica_count][google.cloud.aiplatform.v1.BatchDedicatedResources.max_replica_count] max_replica_count (int): diff --git a/google/cloud/aiplatform_v1/types/migratable_resource.py b/google/cloud/aiplatform_v1/types/migratable_resource.py index 2ea22001c6..9e0d115413 100644 --- a/google/cloud/aiplatform_v1/types/migratable_resource.py +++ b/google/cloud/aiplatform_v1/types/migratable_resource.py @@ -120,7 +120,6 @@ class DataLabelingAnnotatedDataset(proto.Message): annotated_dataset (str): Full resource name of data labeling AnnotatedDataset. Format: - ``projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}``. annotated_dataset_display_name (str): The AnnotatedDataset's display name in diff --git a/google/cloud/aiplatform_v1/types/migration_service.py b/google/cloud/aiplatform_v1/types/migration_service.py index 6a9d231aaf..3102473b6d 100644 --- a/google/cloud/aiplatform_v1/types/migration_service.py +++ b/google/cloud/aiplatform_v1/types/migration_service.py @@ -43,7 +43,7 @@ class SearchMigratableResourcesRequest(proto.Message): Attributes: parent (str): Required. The location that the migratable resources should - be searched from. It's the AI Platform location that the + be searched from. It's the Vertex AI location that the resources can be migrated to, not the resources' original location. Format: ``projects/{project}/locations/{location}`` @@ -53,21 +53,25 @@ class SearchMigratableResourcesRequest(proto.Message): page_token (str): The standard page token. filter (str): - Supported filters are: + A filter for your search. You can use the following types of + filters: - - Resource type: For a specific type of MigratableResource. + - Resource type filters. The following strings filter for a + specific type of + [MigratableResource][google.cloud.aiplatform.v1.MigratableResource]: - ``ml_engine_model_version:*`` - - ``automl_model:*``, + - ``automl_model:*`` - ``automl_dataset:*`` - - ``data_labeling_dataset:*``. + - ``data_labeling_dataset:*`` - - Migrated or not: Filter migrated resource or not by - last_migrate_time. + - "Migrated or not" filters. The following strings filter + for resources that either have or have not already been + migrated: - - ``last_migrate_time:*`` will filter migrated + - ``last_migrate_time:*`` filters for migrated resources. - - ``NOT last_migrate_time:*`` will filter not yet + - ``NOT last_migrate_time:*`` filters for not yet migrated resources. """ @@ -124,28 +128,27 @@ class BatchMigrateResourcesRequest(proto.Message): class MigrateResourceRequest(proto.Message): r"""Config of migrating one resource from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to AI - Platform. + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. Attributes: migrate_ml_engine_model_version_config (google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateMlEngineModelVersionConfig): Config for migrating Version in - ml.googleapis.com to AI Platform's Model. + ml.googleapis.com to Vertex AI's Model. migrate_automl_model_config (google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateAutomlModelConfig): Config for migrating Model in - automl.googleapis.com to AI Platform's Model. + automl.googleapis.com to Vertex AI's Model. migrate_automl_dataset_config (google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateAutomlDatasetConfig): Config for migrating Dataset in - automl.googleapis.com to AI Platform's Dataset. + automl.googleapis.com to Vertex AI's Dataset. migrate_data_labeling_dataset_config (google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateDataLabelingDatasetConfig): Config for migrating Dataset in - datalabeling.googleapis.com to AI Platform's + datalabeling.googleapis.com to Vertex AI's Dataset. """ class MigrateMlEngineModelVersionConfig(proto.Message): - r"""Config for migrating version in ml.googleapis.com to AI - Platform's Model. + r"""Config for migrating version in ml.googleapis.com to Vertex + AI's Model. Attributes: endpoint (str): @@ -164,8 +167,8 @@ class MigrateMlEngineModelVersionConfig(proto.Message): Format: ``projects/{project}/models/{model}/versions/{version}``. model_display_name (str): - Required. Display name of the model in AI - Platform. System will pick a display name if + Required. Display name of the model in Vertex + AI. System will pick a display name if unspecified. """ @@ -174,16 +177,16 @@ class MigrateMlEngineModelVersionConfig(proto.Message): model_display_name = proto.Field(proto.STRING, number=3,) class MigrateAutomlModelConfig(proto.Message): - r"""Config for migrating Model in automl.googleapis.com to AI - Platform's Model. + r"""Config for migrating Model in automl.googleapis.com to Vertex + AI's Model. Attributes: model (str): Required. Full resource name of automl Model. Format: ``projects/{project}/locations/{location}/models/{model}``. model_display_name (str): - Optional. Display name of the model in AI - Platform. System will pick a display name if + Optional. Display name of the model in Vertex + AI. System will pick a display name if unspecified. """ @@ -191,16 +194,16 @@ class MigrateAutomlModelConfig(proto.Message): model_display_name = proto.Field(proto.STRING, number=2,) class MigrateAutomlDatasetConfig(proto.Message): - r"""Config for migrating Dataset in automl.googleapis.com to AI - Platform's Dataset. + r"""Config for migrating Dataset in automl.googleapis.com to + Vertex AI's Dataset. Attributes: dataset (str): Required. Full resource name of automl Dataset. Format: ``projects/{project}/locations/{location}/datasets/{dataset}``. dataset_display_name (str): - Required. Display name of the Dataset in AI - Platform. System will pick a display name if + Required. Display name of the Dataset in + Vertex AI. System will pick a display name if unspecified. """ @@ -216,26 +219,25 @@ class MigrateDataLabelingDatasetConfig(proto.Message): Required. Full resource name of data labeling Dataset. Format: ``projects/{project}/datasets/{dataset}``. dataset_display_name (str): - Optional. Display name of the Dataset in AI - Platform. System will pick a display name if + Optional. Display name of the Dataset in + Vertex AI. System will pick a display name if unspecified. migrate_data_labeling_annotated_dataset_configs (Sequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateDataLabelingDatasetConfig.MigrateDataLabelingAnnotatedDatasetConfig]): Optional. Configs for migrating AnnotatedDataset in datalabeling.googleapis.com - to AI Platform's SavedQuery. The specified + to Vertex AI's SavedQuery. The specified AnnotatedDatasets have to belong to the datalabeling Dataset. """ class MigrateDataLabelingAnnotatedDatasetConfig(proto.Message): r"""Config for migrating AnnotatedDataset in - datalabeling.googleapis.com to AI Platform's SavedQuery. + datalabeling.googleapis.com to Vertex AI's SavedQuery. Attributes: annotated_dataset (str): Required. Full resource name of data labeling AnnotatedDataset. Format: - ``projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}``. """ diff --git a/google/cloud/aiplatform_v1/types/model.py b/google/cloud/aiplatform_v1/types/model.py index 6a6f4043b7..cfd190f2c5 100644 --- a/google/cloud/aiplatform_v1/types/model.py +++ b/google/cloud/aiplatform_v1/types/model.py @@ -50,13 +50,13 @@ class Model(proto.Message): that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. - AutoML Models always have this field populated by AI - Platform, if no additional metadata is needed, this field is - set to an empty string. Note: The URI given on output will - be immutable and probably different, including the URI - scheme, than the one given on input. The output URI will - point to a location where the user only has a read access. + Object `__. + AutoML Models always have this field populated by Vertex AI, + if no additional metadata is needed, this field is set to an + empty string. Note: The URI given on output will be + immutable and probably different, including the URI scheme, + than the one given on input. The output URI will point to a + location where the user only has a read access. metadata (google.protobuf.struct_pb2.Value): Immutable. An additional information about the Model; the schema of the metadata can be found in @@ -76,7 +76,7 @@ class Model(proto.Message): ingested upon [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel], and all binaries it contains are copied and stored - internally by AI Platform. Not present for AutoML Models. + internally by Vertex AI. Not present for AutoML Models. artifact_uri (str): Immutable. The path to the directory containing the Model artifact and any of its @@ -181,7 +181,7 @@ class Model(proto.Message): or [PredictionService.Explain][]. create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this Model was - uploaded into AI Platform. + uploaded into Vertex AI. update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this Model was most recently updated. @@ -307,12 +307,12 @@ class PredictSchemata(proto.Message): [ExplainRequest.instances][] and [BatchPredictionJob.input_config][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. - AutoML Models always have this field populated by AI - Platform. Note: The URI given on output will be immutable - and probably different, including the URI scheme, than the - one given on input. The output URI will point to a location - where the user only has a read access. + Object `__. + AutoML Models always have this field populated by Vertex AI. + Note: The URI given on output will be immutable and probably + different, including the URI scheme, than the one given on + input. The output URI will point to a location where the + user only has a read access. parameters_schema_uri (str): Immutable. Points to a YAML file stored on Google Cloud Storage describing the parameters of prediction and @@ -321,13 +321,13 @@ class PredictSchemata(proto.Message): [ExplainRequest.parameters][] and [BatchPredictionJob.model_parameters][google.cloud.aiplatform.v1.BatchPredictionJob.model_parameters]. The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. - AutoML Models always have this field populated by AI - Platform, if no parameters are supported, then it is set to - an empty string. Note: The URI given on output will be - immutable and probably different, including the URI scheme, - than the one given on input. The output URI will point to a - location where the user only has a read access. + Object `__. + AutoML Models always have this field populated by Vertex AI, + if no parameters are supported, then it is set to an empty + string. Note: The URI given on output will be immutable and + probably different, including the URI scheme, than the one + given on input. The output URI will point to a location + where the user only has a read access. prediction_schema_uri (str): Immutable. Points to a YAML file stored on Google Cloud Storage describing the format of a single prediction @@ -336,12 +336,12 @@ class PredictSchemata(proto.Message): [ExplainResponse.explanations][], and [BatchPredictionJob.output_config][google.cloud.aiplatform.v1.BatchPredictionJob.output_config]. The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. - AutoML Models always have this field populated by AI - Platform. Note: The URI given on output will be immutable - and probably different, including the URI scheme, than the - one given on input. The output URI will point to a location - where the user only has a read access. + Object `__. + AutoML Models always have this field populated by Vertex AI. + Note: The URI given on output will be immutable and probably + different, including the URI scheme, than the one given on + input. The output URI will point to a location where the + user only has a read access. """ instance_schema_uri = proto.Field(proto.STRING, number=1,) @@ -350,19 +350,20 @@ class PredictSchemata(proto.Message): class ModelContainerSpec(proto.Message): - r"""Specification of a container for serving predictions. This message - is a subset of the Kubernetes Container v1 core - `specification `__. + r"""Specification of a container for serving predictions. Some fields in + this message correspond to fields in the `Kubernetes Container v1 + core + specification `__. Attributes: image_uri (str): Required. Immutable. URI of the Docker image to be used as the custom container for serving predictions. This URI must identify an image in Artifact Registry or Container - Registry. Learn more about the container publishing - requirements, including permissions requirements for the AI - Platform Service Agent, - `here `__. + Registry. Learn more about the `container publishing + requirements `__, + including permissions requirements for the AI Platform + Service Agent. The container image is ingested upon [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel], @@ -371,7 +372,12 @@ class ModelContainerSpec(proto.Message): To learn about the requirements for the Docker image itself, see `Custom container - requirements `__. + requirements `__. + + You can use the URI to one of Vertex AI's `pre-built + container images for + prediction `__ + in this field. command (Sequence[str]): Immutable. Specifies the command that runs when the container starts. This overrides the container's @@ -387,20 +393,20 @@ class ModelContainerSpec(proto.Message): ```CMD`` `__, if either exists. If this field is not specified and the container does not have an ``ENTRYPOINT``, then refer to the - Docker documentation about how ``CMD`` and ``ENTRYPOINT`` - `interact `__. + Docker documentation about `how ``CMD`` and ``ENTRYPOINT`` + interact `__. If you specify this field, then you can also specify the ``args`` field to provide additional arguments for this command. However, if you specify this field, then the container's ``CMD`` is ignored. See the `Kubernetes - documentation `__ about how - the ``command`` and ``args`` fields interact with a - container's ``ENTRYPOINT`` and ``CMD``. + documentation about how the ``command`` and ``args`` fields + interact with a container's ``ENTRYPOINT`` and + ``CMD`` `__. - In this field, you can reference environment variables `set - by AI - Platform `__ + In this field, you can reference `environment variables set + by Vertex + AI `__ and environment variables set in the [env][google.cloud.aiplatform.v1.ModelContainerSpec.env] field. You cannot reference environment variables set in the @@ -413,7 +419,7 @@ class ModelContainerSpec(proto.Message): this syntax with ``$$``; for example: $$(VARIABLE_NAME) This field corresponds to the ``command`` field of the Kubernetes Containers `v1 core - API `__. + API `__. args (Sequence[str]): Immutable. Specifies arguments for the command that runs when the container starts. This overrides the container's @@ -425,20 +431,21 @@ class ModelContainerSpec(proto.Message): [command][google.cloud.aiplatform.v1.ModelContainerSpec.command] field, then the command from the ``command`` field runs without any additional arguments. See the `Kubernetes - documentation `__ about how - the ``command`` and ``args`` fields interact with a - container's ``ENTRYPOINT`` and ``CMD``. + documentation about how the ``command`` and ``args`` fields + interact with a container's ``ENTRYPOINT`` and + ``CMD`` `__. If you don't specify this field and don't specify the ``command`` field, then the container's ```ENTRYPOINT`` `__ and ``CMD`` determine what runs based on their default - behavior. See the Docker documentation about how ``CMD`` and - ``ENTRYPOINT`` `interact `__. + behavior. See the Docker documentation about `how ``CMD`` + and ``ENTRYPOINT`` + interact `__. - In this field, you can reference environment variables `set - by AI - Platform `__ + In this field, you can reference `environment variables set + by Vertex + AI `__ and environment variables set in the [env][google.cloud.aiplatform.v1.ModelContainerSpec.env] field. You cannot reference environment variables set in the @@ -451,7 +458,7 @@ class ModelContainerSpec(proto.Message): this syntax with ``$$``; for example: $$(VARIABLE_NAME) This field corresponds to the ``args`` field of the Kubernetes Containers `v1 core - API `__. + API `__. env (Sequence[google.cloud.aiplatform_v1.types.EnvVar]): Immutable. List of environment variables to set in the container. After the container starts running, code running @@ -484,14 +491,14 @@ class ModelContainerSpec(proto.Message): This field corresponds to the ``env`` field of the Kubernetes Containers `v1 core - API `__. + API `__. ports (Sequence[google.cloud.aiplatform_v1.types.Port]): - Immutable. List of ports to expose from the container. AI - Platform sends any prediction requests that it receives to + Immutable. List of ports to expose from the container. + Vertex AI sends any prediction requests that it receives to the first port on this list. AI Platform also sends `liveness and health - checks `__ to - this port. + checks `__ + to this port. If you do not specify this field, it defaults to following value: @@ -504,20 +511,20 @@ class ModelContainerSpec(proto.Message): } ] - AI Platform does not use ports other than the first one + Vertex AI does not use ports other than the first one listed. This field corresponds to the ``ports`` field of the Kubernetes Containers `v1 core - API `__. + API `__. predict_route (str): Immutable. HTTP path on the container to send prediction - requests to. AI Platform forwards requests sent using + requests to. Vertex AI forwards requests sent using [projects.locations.endpoints.predict][google.cloud.aiplatform.v1.PredictionService.Predict] - to this path on the container's IP address and port. AI - Platform then returns the container's response in the API + to this path on the container's IP address and port. Vertex + AI then returns the container's response in the API response. - For example, if you set this field to ``/foo``, then when AI - Platform receives a prediction request, it forwards the + For example, if you set this field to ``/foo``, then when + Vertex AI receives a prediction request, it forwards the request body in a POST request to the ``/foo`` path on the port of your container specified by the first value of this ``ModelContainerSpec``'s @@ -532,28 +539,28 @@ class ModelContainerSpec(proto.Message): - ENDPOINT: The last segment (following ``endpoints/``)of the Endpoint.name][] field of the Endpoint where this - Model has been deployed. (AI Platform makes this value + Model has been deployed. (Vertex AI makes this value available to your container code as the - ```AIP_ENDPOINT_ID`` `__ - environment variable.) + ```AIP_ENDPOINT_ID`` environment + variable `__.) - DEPLOYED_MODEL: [DeployedModel.id][google.cloud.aiplatform.v1.DeployedModel.id] - of the ``DeployedModel``. (AI Platform makes this value + of the ``DeployedModel``. (Vertex AI makes this value available to your container code as the ```AIP_DEPLOYED_MODEL_ID`` environment - variable `__.) + variable `__.) health_route (str): Immutable. HTTP path on the container to send health checks - to. AI Platform intermittently sends GET requests to this - path on the container's IP address and port to check that - the container is healthy. Read more about `health - checks `__. - - For example, if you set this field to ``/bar``, then AI - Platform intermittently sends a GET request to the ``/bar`` - path on the port of your container specified by the first - value of this ``ModelContainerSpec``'s + to. Vertex AI intermittently sends GET requests to this path + on the container's IP address and port to check that the + container is healthy. Read more about `health + checks `__. + + For example, if you set this field to ``/bar``, then Vertex + AI intermittently sends a GET request to the ``/bar`` path + on the port of your container specified by the first value + of this ``ModelContainerSpec``'s [ports][google.cloud.aiplatform.v1.ModelContainerSpec.ports] field. @@ -565,17 +572,17 @@ class ModelContainerSpec(proto.Message): - ENDPOINT: The last segment (following ``endpoints/``)of the Endpoint.name][] field of the Endpoint where this - Model has been deployed. (AI Platform makes this value + Model has been deployed. (Vertex AI makes this value available to your container code as the - ```AIP_ENDPOINT_ID`` `__ - environment variable.) + ```AIP_ENDPOINT_ID`` environment + variable `__.) - DEPLOYED_MODEL: [DeployedModel.id][google.cloud.aiplatform.v1.DeployedModel.id] - of the ``DeployedModel``. (AI Platform makes this value + of the ``DeployedModel``. (Vertex AI makes this value available to your container code as the - ```AIP_DEPLOYED_MODEL_ID`` `__ - environment variable.) + ```AIP_DEPLOYED_MODEL_ID`` environment + variable `__.) """ image_uri = proto.Field(proto.STRING, number=1,) diff --git a/google/cloud/aiplatform_v1/types/model_evaluation.py b/google/cloud/aiplatform_v1/types/model_evaluation.py index 51c11de4f2..c347f71bd3 100644 --- a/google/cloud/aiplatform_v1/types/model_evaluation.py +++ b/google/cloud/aiplatform_v1/types/model_evaluation.py @@ -39,7 +39,7 @@ class ModelEvaluation(proto.Message): [metrics][google.cloud.aiplatform.v1.ModelEvaluation.metrics] of this ModelEvaluation. The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. + Object `__. metrics (google.protobuf.struct_pb2.Value): Output only. Evaluation metrics of the Model. The schema of the metrics is stored in diff --git a/google/cloud/aiplatform_v1/types/model_evaluation_slice.py b/google/cloud/aiplatform_v1/types/model_evaluation_slice.py index 4edf4acac3..f85b35a314 100644 --- a/google/cloud/aiplatform_v1/types/model_evaluation_slice.py +++ b/google/cloud/aiplatform_v1/types/model_evaluation_slice.py @@ -42,7 +42,7 @@ class ModelEvaluationSlice(proto.Message): [metrics][google.cloud.aiplatform.v1.ModelEvaluationSlice.metrics] of this ModelEvaluationSlice. The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. + Object `__. metrics (google.protobuf.struct_pb2.Value): Output only. Sliced evaluation metrics of the Model. The schema of the metrics is stored in diff --git a/google/cloud/aiplatform_v1/types/model_service.py b/google/cloud/aiplatform_v1/types/model_service.py index aca2817dbe..f315436f5f 100644 --- a/google/cloud/aiplatform_v1/types/model_service.py +++ b/google/cloud/aiplatform_v1/types/model_service.py @@ -197,7 +197,7 @@ class UpdateModelRequest(proto.Message): update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The update mask applies to the resource. For the ``FieldMask`` definition, see - `FieldMask `__. + [google.protobuf.FieldMask][google.protobuf.FieldMask]. """ model = proto.Field(proto.MESSAGE, number=1, message=gca_model.Model,) @@ -326,7 +326,6 @@ class GetModelEvaluationRequest(proto.Message): Attributes: name (str): Required. The name of the ModelEvaluation resource. Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` """ @@ -395,7 +394,6 @@ class GetModelEvaluationSliceRequest(proto.Message): name (str): Required. The name of the ModelEvaluationSlice resource. Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}`` """ @@ -410,7 +408,6 @@ class ListModelEvaluationSlicesRequest(proto.Message): parent (str): Required. The resource name of the ModelEvaluation to list the ModelEvaluationSlices from. Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` filter (str): The standard list filter. diff --git a/google/cloud/aiplatform_v1/types/pipeline_service.py b/google/cloud/aiplatform_v1/types/pipeline_service.py index 0f659285e2..87681a47e6 100644 --- a/google/cloud/aiplatform_v1/types/pipeline_service.py +++ b/google/cloud/aiplatform_v1/types/pipeline_service.py @@ -58,7 +58,6 @@ class GetTrainingPipelineRequest(proto.Message): Attributes: name (str): Required. The name of the TrainingPipeline resource. Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` """ @@ -141,7 +140,6 @@ class DeleteTrainingPipelineRequest(proto.Message): name (str): Required. The name of the TrainingPipeline resource to be deleted. Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` """ @@ -156,7 +154,6 @@ class CancelTrainingPipelineRequest(proto.Message): name (str): Required. The name of the TrainingPipeline to cancel. Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` """ diff --git a/google/cloud/aiplatform_v1/types/specialist_pool_service.py b/google/cloud/aiplatform_v1/types/specialist_pool_service.py index d7b5f33f75..78f44207ff 100644 --- a/google/cloud/aiplatform_v1/types/specialist_pool_service.py +++ b/google/cloud/aiplatform_v1/types/specialist_pool_service.py @@ -76,7 +76,6 @@ class GetSpecialistPoolRequest(proto.Message): name (str): Required. The name of the SpecialistPool resource. The form is - ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``. """ @@ -181,7 +180,6 @@ class UpdateSpecialistPoolOperationMetadata(proto.Message): specialist_pool (str): Output only. The name of the SpecialistPool to which the specialists are being added. Format: - ``projects/{project_id}/locations/{location_id}/specialistPools/{specialist_pool}`` generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): The operation generic information. diff --git a/google/cloud/aiplatform_v1/types/training_pipeline.py b/google/cloud/aiplatform_v1/types/training_pipeline.py index acd18f7b2e..52acbcb2d2 100644 --- a/google/cloud/aiplatform_v1/types/training_pipeline.py +++ b/google/cloud/aiplatform_v1/types/training_pipeline.py @@ -40,9 +40,9 @@ class TrainingPipeline(proto.Message): r"""The TrainingPipeline orchestrates tasks associated with training a Model. It always executes the training task, and optionally may also - export data from AI Platform's Dataset which becomes the training + export data from Vertex AI's Dataset which becomes the training input, [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] - the Model to AI Platform, and evaluate the Model. + the Model to Vertex AI, and evaluate the Model. Attributes: name (str): @@ -52,7 +52,7 @@ class TrainingPipeline(proto.Message): Required. The user-defined name of this TrainingPipeline. input_data_config (google.cloud.aiplatform_v1.types.InputDataConfig): - Specifies AI Platform owned input data that may be used for + Specifies Vertex AI owned input data that may be used for training the Model. The TrainingPipeline's [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition] should make clear whether this config is used and if there @@ -104,10 +104,10 @@ class TrainingPipeline(proto.Message): does not support uploading a Model as part of the pipeline. When the Pipeline's state becomes ``PIPELINE_STATE_SUCCEEDED`` and the trained Model had been - uploaded into AI Platform, then the model_to_upload's - resource [name][google.cloud.aiplatform.v1.Model.name] is - populated. The Model is always uploaded into the Project and - Location in which this pipeline is. + uploaded into Vertex AI, then the model_to_upload's resource + [name][google.cloud.aiplatform.v1.Model.name] is populated. + The Model is always uploaded into the Project and Location + in which this pipeline is. state (google.cloud.aiplatform_v1.types.PipelineState): Output only. The detailed state of the pipeline. @@ -175,8 +175,8 @@ class TrainingPipeline(proto.Message): class InputDataConfig(proto.Message): - r"""Specifies AI Platform owned input data to be used for - training, and possibly evaluating, the Model. + r"""Specifies Vertex AI owned input data to be used for training, + and possibly evaluating, the Model. Attributes: fraction_split (google.cloud.aiplatform_v1.types.FractionSplit): @@ -201,24 +201,22 @@ class InputDataConfig(proto.Message): format. All training input data is written into that directory. - The AI Platform environment variables representing Cloud + The Vertex AI environment variables representing Cloud Storage data URIs are represented in the Cloud Storage wildcard format to support sharded data. e.g.: "gs://.../training-*.jsonl" - AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for tabular data - - AIP_TRAINING_DATA_URI = - "gcs_destination/dataset---/training-*.${AIP_DATA_FORMAT}" + - AIP_TRAINING_DATA_URI = + "gcs_destination/dataset---/training-*.${AIP_DATA_FORMAT}" - AIP_VALIDATION_DATA_URI = - - "gcs_destination/dataset---/validation-*.${AIP_DATA_FORMAT}" + "gcs_destination/dataset---/validation-*.${AIP_DATA_FORMAT}" - AIP_TEST_DATA_URI = - - "gcs_destination/dataset---/test-*.${AIP_DATA_FORMAT}". + "gcs_destination/dataset---/test-*.${AIP_DATA_FORMAT}". bigquery_destination (google.cloud.aiplatform_v1.types.BigQueryDestination): Only applicable to custom training with tabular Dataset with BigQuery source. @@ -233,13 +231,12 @@ class InputDataConfig(proto.Message): ``validation`` and ``test``. - AIP_DATA_FORMAT = "bigquery". - - AIP_TRAINING_DATA_URI = - "bigquery_destination.dataset\_\ **\ .training" + - AIP_TRAINING_DATA_URI = + "bigquery_destination.dataset\_\ **\ .training" - AIP_VALIDATION_DATA_URI = - - "bigquery_destination.dataset\_\ **\ .validation" + "bigquery_destination.dataset\_\ **\ .validation" - AIP_TEST_DATA_URI = "bigquery_destination.dataset\_\ **\ .test". @@ -261,8 +258,8 @@ class InputDataConfig(proto.Message): ignored by the split method are used in respectively training, validation or test role, depending on the role of the DataItem they are on (for the auto-assigned that role is - decided by AI Platform). A filter with same syntax as the - one used in + decided by Vertex AI). A filter with same syntax as the one + used in [ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations] may be used, but note here it filters across all Annotations of the Dataset, and not just within a single DataItem. @@ -273,8 +270,8 @@ class InputDataConfig(proto.Message): Cloud Storage URI that points to a YAML file describing the annotation schema. The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. The - schema files that can be used here are found in + Object `__. + The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/ , note that the chosen schema must be consistent with [metadata][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] @@ -322,7 +319,7 @@ class FractionSplit(proto.Message): the given fractions. Any of ``training_fraction``, ``validation_fraction`` and ``test_fraction`` may optionally be provided, they must sum to up to 1. If the provided ones sum to less - than 1, the remainder is assigned to sets as decided by AI Platform. + than 1, the remainder is assigned to sets as decided by Vertex AI. If none of the fractions are set, by default roughly 80% of data is used for training, 10% for validation, and 10% for test. diff --git a/google/cloud/aiplatform_v1/types/user_action_reference.py b/google/cloud/aiplatform_v1/types/user_action_reference.py index 203b604ae2..27f0bcdf92 100644 --- a/google/cloud/aiplatform_v1/types/user_action_reference.py +++ b/google/cloud/aiplatform_v1/types/user_action_reference.py @@ -35,11 +35,11 @@ class UserActionReference(proto.Message): data_labeling_job (str): For API calls that start a LabelingJob. Resource name of the LabelingJob. Format: - 'projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}' method (str): - The method name of the API call. For example, - "/google.cloud.aiplatform.v1alpha1.DatasetService.CreateDataset". + The method name of the API RPC call. For + example, + "/google.cloud.aiplatform.{apiVersion}.DatasetService.CreateDataset". """ operation = proto.Field(proto.STRING, number=1, oneof="reference",) diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py index 1ef85adee3..39119b99fd 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py @@ -91,7 +91,8 @@ class DatasetServiceAsyncClient: @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -106,7 +107,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -123,7 +124,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> DatasetServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: DatasetServiceTransport: The transport used by the client instance. @@ -142,7 +143,7 @@ def __init__( client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the dataset service client. + """Instantiates the dataset service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -369,7 +370,7 @@ async def update_dataset( update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): Required. The update mask applies to the resource. For the ``FieldMask`` definition, see - `FieldMask `__. + [google.protobuf.FieldMask][google.protobuf.FieldMask]. Updatable fields: - ``display_name`` diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py index 140646f861..8582d349eb 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py @@ -65,7 +65,7 @@ class DatasetServiceClientMeta(type): _transport_registry["grpc_asyncio"] = DatasetServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[DatasetServiceTransport]: - """Return an appropriate transport class. + """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is @@ -88,7 +88,8 @@ class DatasetServiceClient(metaclass=DatasetServiceClientMeta): @staticmethod def _get_default_mtls_endpoint(api_endpoint): - """Convert api endpoint to mTLS endpoint. + """Converts api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: @@ -122,7 +123,8 @@ def _get_default_mtls_endpoint(api_endpoint): @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -139,7 +141,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -158,10 +160,11 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> DatasetServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: - DatasetServiceTransport: The transport used by the client instance. + DatasetServiceTransport: The transport used by the client + instance. """ return self._transport @@ -169,7 +172,7 @@ def transport(self) -> DatasetServiceTransport: def annotation_path( project: str, location: str, dataset: str, data_item: str, annotation: str, ) -> str: - """Return a fully-qualified annotation string.""" + """Returns a fully-qualified annotation string.""" return "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}/annotations/{annotation}".format( project=project, location=location, @@ -180,7 +183,7 @@ def annotation_path( @staticmethod def parse_annotation_path(path: str) -> Dict[str, str]: - """Parse a annotation path into its component segments.""" + """Parses a annotation path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/dataItems/(?P.+?)/annotations/(?P.+?)$", path, @@ -191,7 +194,7 @@ def parse_annotation_path(path: str) -> Dict[str, str]: def annotation_spec_path( project: str, location: str, dataset: str, annotation_spec: str, ) -> str: - """Return a fully-qualified annotation_spec string.""" + """Returns a fully-qualified annotation_spec string.""" return "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format( project=project, location=location, @@ -201,7 +204,7 @@ def annotation_spec_path( @staticmethod def parse_annotation_spec_path(path: str) -> Dict[str, str]: - """Parse a annotation_spec path into its component segments.""" + """Parses a annotation_spec path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/annotationSpecs/(?P.+?)$", path, @@ -212,14 +215,14 @@ def parse_annotation_spec_path(path: str) -> Dict[str, str]: def data_item_path( project: str, location: str, dataset: str, data_item: str, ) -> str: - """Return a fully-qualified data_item string.""" + """Returns a fully-qualified data_item string.""" return "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}".format( project=project, location=location, dataset=dataset, data_item=data_item, ) @staticmethod def parse_data_item_path(path: str) -> Dict[str, str]: - """Parse a data_item path into its component segments.""" + """Parses a data_item path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/dataItems/(?P.+?)$", path, @@ -228,14 +231,14 @@ def parse_data_item_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path(project: str, location: str, dataset: str,) -> str: - """Return a fully-qualified dataset string.""" + """Returns a fully-qualified dataset string.""" return "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: - """Parse a dataset path into its component segments.""" + """Parses a dataset path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path, @@ -244,7 +247,7 @@ def parse_dataset_path(path: str) -> Dict[str, str]: @staticmethod def common_billing_account_path(billing_account: str,) -> str: - """Return a fully-qualified billing_account string.""" + """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -257,7 +260,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: @staticmethod def common_folder_path(folder: str,) -> str: - """Return a fully-qualified folder string.""" + """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod @@ -268,7 +271,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: @staticmethod def common_organization_path(organization: str,) -> str: - """Return a fully-qualified organization string.""" + """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod @@ -279,7 +282,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: @staticmethod def common_project_path(project: str,) -> str: - """Return a fully-qualified project string.""" + """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod @@ -290,7 +293,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]: @staticmethod def common_location_path(project: str, location: str,) -> str: - """Return a fully-qualified location string.""" + """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -309,7 +312,7 @@ def __init__( client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the dataset service client. + """Instantiates the dataset service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -364,9 +367,10 @@ def __init__( client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() - client_cert_source_func = ( - mtls.default_client_cert_source() if is_mtls else None - ) + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -378,12 +382,14 @@ def __init__( elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": - api_endpoint = ( - self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT - ) + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" ) # Save or instantiate the transport. @@ -398,8 +404,8 @@ def __init__( ) if client_options.scopes: raise ValueError( - "When providing a transport instance, " - "provide its scopes directly." + "When providing a transport instance, provide its scopes " + "directly." ) self._transport = transport else: @@ -602,7 +608,7 @@ def update_dataset( update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The update mask applies to the resource. For the ``FieldMask`` definition, see - `FieldMask `__. + [google.protobuf.FieldMask][google.protobuf.FieldMask]. Updatable fields: - ``display_name`` diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/pagers.py index f57caea0a6..192407a28a 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/pagers.py @@ -118,7 +118,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -246,7 +246,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -374,7 +374,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py index fc38381f98..77d2abab83 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py @@ -81,7 +81,8 @@ class EndpointServiceAsyncClient: @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -96,7 +97,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -113,7 +114,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> EndpointServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: EndpointServiceTransport: The transport used by the client instance. @@ -132,7 +133,7 @@ def __init__( client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the endpoint service client. + """Instantiates the endpoint service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -439,7 +440,7 @@ async def update_endpoint( should not be set. update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): Required. The update mask applies to the resource. See - `FieldMask `__. + [google.protobuf.FieldMask][google.protobuf.FieldMask]. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py index 2b9b6b1146..6a5371b98b 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py @@ -61,7 +61,7 @@ class EndpointServiceClientMeta(type): _transport_registry["grpc_asyncio"] = EndpointServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[EndpointServiceTransport]: - """Return an appropriate transport class. + """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is @@ -84,7 +84,8 @@ class EndpointServiceClient(metaclass=EndpointServiceClientMeta): @staticmethod def _get_default_mtls_endpoint(api_endpoint): - """Convert api endpoint to mTLS endpoint. + """Converts api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: @@ -118,7 +119,8 @@ def _get_default_mtls_endpoint(api_endpoint): @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -135,7 +137,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -154,23 +156,24 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> EndpointServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: - EndpointServiceTransport: The transport used by the client instance. + EndpointServiceTransport: The transport used by the client + instance. """ return self._transport @staticmethod def endpoint_path(project: str, location: str, endpoint: str,) -> str: - """Return a fully-qualified endpoint string.""" + """Returns a fully-qualified endpoint string.""" return "projects/{project}/locations/{location}/endpoints/{endpoint}".format( project=project, location=location, endpoint=endpoint, ) @staticmethod def parse_endpoint_path(path: str) -> Dict[str, str]: - """Parse a endpoint path into its component segments.""" + """Parses a endpoint path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path, @@ -179,14 +182,14 @@ def parse_endpoint_path(path: str) -> Dict[str, str]: @staticmethod def model_path(project: str, location: str, model: str,) -> str: - """Return a fully-qualified model string.""" + """Returns a fully-qualified model string.""" return "projects/{project}/locations/{location}/models/{model}".format( project=project, location=location, model=model, ) @staticmethod def parse_model_path(path: str) -> Dict[str, str]: - """Parse a model path into its component segments.""" + """Parses a model path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path, @@ -195,7 +198,7 @@ def parse_model_path(path: str) -> Dict[str, str]: @staticmethod def common_billing_account_path(billing_account: str,) -> str: - """Return a fully-qualified billing_account string.""" + """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -208,7 +211,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: @staticmethod def common_folder_path(folder: str,) -> str: - """Return a fully-qualified folder string.""" + """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod @@ -219,7 +222,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: @staticmethod def common_organization_path(organization: str,) -> str: - """Return a fully-qualified organization string.""" + """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod @@ -230,7 +233,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: @staticmethod def common_project_path(project: str,) -> str: - """Return a fully-qualified project string.""" + """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod @@ -241,7 +244,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]: @staticmethod def common_location_path(project: str, location: str,) -> str: - """Return a fully-qualified location string.""" + """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -260,7 +263,7 @@ def __init__( client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the endpoint service client. + """Instantiates the endpoint service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -315,9 +318,10 @@ def __init__( client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() - client_cert_source_func = ( - mtls.default_client_cert_source() if is_mtls else None - ) + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -329,12 +333,14 @@ def __init__( elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": - api_endpoint = ( - self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT - ) + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" ) # Save or instantiate the transport. @@ -349,8 +355,8 @@ def __init__( ) if client_options.scopes: raise ValueError( - "When providing a transport instance, " - "provide its scopes directly." + "When providing a transport instance, provide its scopes " + "directly." ) self._transport = transport else: @@ -633,7 +639,7 @@ def update_endpoint( should not be set. update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The update mask applies to the resource. See - `FieldMask `__. + [google.protobuf.FieldMask][google.protobuf.FieldMask]. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/pagers.py index b16d1cf1a1..5e12e63dd2 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/pagers.py @@ -116,7 +116,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py index 270ad60e8f..b01ae05e3e 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py @@ -84,7 +84,8 @@ class FeaturestoreOnlineServingServiceAsyncClient: @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -99,7 +100,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -116,7 +117,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> FeaturestoreOnlineServingServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: FeaturestoreOnlineServingServiceTransport: The transport used by the client instance. @@ -138,7 +139,7 @@ def __init__( client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the featurestore online serving service client. + """Instantiates the featurestore online serving service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py index 9389db9195..322e19e043 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py @@ -60,7 +60,7 @@ class FeaturestoreOnlineServingServiceClientMeta(type): def get_transport_class( cls, label: str = None, ) -> Type[FeaturestoreOnlineServingServiceTransport]: - """Return an appropriate transport class. + """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is @@ -85,7 +85,8 @@ class FeaturestoreOnlineServingServiceClient( @staticmethod def _get_default_mtls_endpoint(api_endpoint): - """Convert api endpoint to mTLS endpoint. + """Converts api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: @@ -119,7 +120,8 @@ def _get_default_mtls_endpoint(api_endpoint): @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -136,7 +138,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -155,10 +157,11 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> FeaturestoreOnlineServingServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: - FeaturestoreOnlineServingServiceTransport: The transport used by the client instance. + FeaturestoreOnlineServingServiceTransport: The transport used by the client + instance. """ return self._transport @@ -166,7 +169,7 @@ def transport(self) -> FeaturestoreOnlineServingServiceTransport: def entity_type_path( project: str, location: str, featurestore: str, entity_type: str, ) -> str: - """Return a fully-qualified entity_type string.""" + """Returns a fully-qualified entity_type string.""" return "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format( project=project, location=location, @@ -176,7 +179,7 @@ def entity_type_path( @staticmethod def parse_entity_type_path(path: str) -> Dict[str, str]: - """Parse a entity_type path into its component segments.""" + """Parses a entity_type path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)/entityTypes/(?P.+?)$", path, @@ -185,7 +188,7 @@ def parse_entity_type_path(path: str) -> Dict[str, str]: @staticmethod def common_billing_account_path(billing_account: str,) -> str: - """Return a fully-qualified billing_account string.""" + """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -198,7 +201,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: @staticmethod def common_folder_path(folder: str,) -> str: - """Return a fully-qualified folder string.""" + """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod @@ -209,7 +212,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: @staticmethod def common_organization_path(organization: str,) -> str: - """Return a fully-qualified organization string.""" + """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod @@ -220,7 +223,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: @staticmethod def common_project_path(project: str,) -> str: - """Return a fully-qualified project string.""" + """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod @@ -231,7 +234,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]: @staticmethod def common_location_path(project: str, location: str,) -> str: - """Return a fully-qualified location string.""" + """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -250,7 +253,7 @@ def __init__( client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the featurestore online serving service client. + """Instantiates the featurestore online serving service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -305,9 +308,10 @@ def __init__( client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() - client_cert_source_func = ( - mtls.default_client_cert_source() if is_mtls else None - ) + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -319,12 +323,14 @@ def __init__( elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": - api_endpoint = ( - self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT - ) + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" ) # Save or instantiate the transport. @@ -339,8 +345,8 @@ def __init__( ) if client_options.scopes: raise ValueError( - "When providing a transport instance, " - "provide its scopes directly." + "When providing a transport instance, provide its scopes " + "directly." ) self._transport = transport else: diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py index 6ab5b3cc7f..97a11cfbed 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py @@ -94,7 +94,8 @@ class FeaturestoreServiceAsyncClient: @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -109,7 +110,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -126,7 +127,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> FeaturestoreServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: FeaturestoreServiceTransport: The transport used by the client instance. @@ -146,7 +147,7 @@ def __init__( client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the featurestore service client. + """Instantiates the featurestore service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -467,10 +468,8 @@ async def update_featurestore( Updatable fields: - - ``display_name`` - ``labels`` - ``online_serving_config.fixed_node_count`` - - ``retention_policy.online_storage_ttl_days`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py index 97d26b0d3f..5e1597a5b0 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py @@ -68,7 +68,7 @@ class FeaturestoreServiceClientMeta(type): def get_transport_class( cls, label: str = None, ) -> Type[FeaturestoreServiceTransport]: - """Return an appropriate transport class. + """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is @@ -93,7 +93,8 @@ class FeaturestoreServiceClient(metaclass=FeaturestoreServiceClientMeta): @staticmethod def _get_default_mtls_endpoint(api_endpoint): - """Convert api endpoint to mTLS endpoint. + """Converts api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: @@ -127,7 +128,8 @@ def _get_default_mtls_endpoint(api_endpoint): @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -144,7 +146,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -163,10 +165,11 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> FeaturestoreServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: - FeaturestoreServiceTransport: The transport used by the client instance. + FeaturestoreServiceTransport: The transport used by the client + instance. """ return self._transport @@ -174,7 +177,7 @@ def transport(self) -> FeaturestoreServiceTransport: def entity_type_path( project: str, location: str, featurestore: str, entity_type: str, ) -> str: - """Return a fully-qualified entity_type string.""" + """Returns a fully-qualified entity_type string.""" return "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format( project=project, location=location, @@ -184,7 +187,7 @@ def entity_type_path( @staticmethod def parse_entity_type_path(path: str) -> Dict[str, str]: - """Parse a entity_type path into its component segments.""" + """Parses a entity_type path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)/entityTypes/(?P.+?)$", path, @@ -195,7 +198,7 @@ def parse_entity_type_path(path: str) -> Dict[str, str]: def feature_path( project: str, location: str, featurestore: str, entity_type: str, feature: str, ) -> str: - """Return a fully-qualified feature string.""" + """Returns a fully-qualified feature string.""" return "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}".format( project=project, location=location, @@ -206,7 +209,7 @@ def feature_path( @staticmethod def parse_feature_path(path: str) -> Dict[str, str]: - """Parse a feature path into its component segments.""" + """Parses a feature path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)/entityTypes/(?P.+?)/features/(?P.+?)$", path, @@ -215,14 +218,14 @@ def parse_feature_path(path: str) -> Dict[str, str]: @staticmethod def featurestore_path(project: str, location: str, featurestore: str,) -> str: - """Return a fully-qualified featurestore string.""" + """Returns a fully-qualified featurestore string.""" return "projects/{project}/locations/{location}/featurestores/{featurestore}".format( project=project, location=location, featurestore=featurestore, ) @staticmethod def parse_featurestore_path(path: str) -> Dict[str, str]: - """Parse a featurestore path into its component segments.""" + """Parses a featurestore path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)$", path, @@ -231,7 +234,7 @@ def parse_featurestore_path(path: str) -> Dict[str, str]: @staticmethod def common_billing_account_path(billing_account: str,) -> str: - """Return a fully-qualified billing_account string.""" + """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -244,7 +247,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: @staticmethod def common_folder_path(folder: str,) -> str: - """Return a fully-qualified folder string.""" + """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod @@ -255,7 +258,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: @staticmethod def common_organization_path(organization: str,) -> str: - """Return a fully-qualified organization string.""" + """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod @@ -266,7 +269,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: @staticmethod def common_project_path(project: str,) -> str: - """Return a fully-qualified project string.""" + """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod @@ -277,7 +280,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]: @staticmethod def common_location_path(project: str, location: str,) -> str: - """Return a fully-qualified location string.""" + """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -296,7 +299,7 @@ def __init__( client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the featurestore service client. + """Instantiates the featurestore service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -351,9 +354,10 @@ def __init__( client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() - client_cert_source_func = ( - mtls.default_client_cert_source() if is_mtls else None - ) + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -365,12 +369,14 @@ def __init__( elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": - api_endpoint = ( - self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT - ) + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" ) # Save or instantiate the transport. @@ -385,8 +391,8 @@ def __init__( ) if client_options.scopes: raise ValueError( - "When providing a transport instance, " - "provide its scopes directly." + "When providing a transport instance, provide its scopes " + "directly." ) self._transport = transport else: @@ -683,10 +689,8 @@ def update_featurestore( Updatable fields: - - ``display_name`` - ``labels`` - ``online_serving_config.fixed_node_count`` - - ``retention_policy.online_storage_ttl_days`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/pagers.py index 26ba8e31d2..71e3d991c6 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/pagers.py @@ -120,7 +120,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -250,7 +250,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -380,7 +380,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -508,7 +508,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py index ef4ee399c0..0e72443765 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py @@ -42,7 +42,7 @@ class IndexEndpointServiceAsyncClient: - """A service for managing AI Platform's IndexEndpoints.""" + """A service for managing Vertex AI's IndexEndpoints.""" _client: IndexEndpointServiceClient @@ -82,7 +82,8 @@ class IndexEndpointServiceAsyncClient: @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -97,7 +98,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -114,7 +115,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> IndexEndpointServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: IndexEndpointServiceTransport: The transport used by the client instance. @@ -134,7 +135,7 @@ def __init__( client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the index endpoint service client. + """Instantiates the index endpoint service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -444,7 +445,7 @@ async def update_index_endpoint( should not be set. update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): Required. The update mask applies to the resource. See - `FieldMask `__. + [google.protobuf.FieldMask][google.protobuf.FieldMask]. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py index 6d37fc13fd..2952bbd94b 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py @@ -62,7 +62,7 @@ class IndexEndpointServiceClientMeta(type): def get_transport_class( cls, label: str = None, ) -> Type[IndexEndpointServiceTransport]: - """Return an appropriate transport class. + """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is @@ -81,11 +81,12 @@ def get_transport_class( class IndexEndpointServiceClient(metaclass=IndexEndpointServiceClientMeta): - """A service for managing AI Platform's IndexEndpoints.""" + """A service for managing Vertex AI's IndexEndpoints.""" @staticmethod def _get_default_mtls_endpoint(api_endpoint): - """Convert api endpoint to mTLS endpoint. + """Converts api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: @@ -119,7 +120,8 @@ def _get_default_mtls_endpoint(api_endpoint): @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -136,7 +138,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -155,23 +157,24 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> IndexEndpointServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: - IndexEndpointServiceTransport: The transport used by the client instance. + IndexEndpointServiceTransport: The transport used by the client + instance. """ return self._transport @staticmethod def index_path(project: str, location: str, index: str,) -> str: - """Return a fully-qualified index string.""" + """Returns a fully-qualified index string.""" return "projects/{project}/locations/{location}/indexes/{index}".format( project=project, location=location, index=index, ) @staticmethod def parse_index_path(path: str) -> Dict[str, str]: - """Parse a index path into its component segments.""" + """Parses a index path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/indexes/(?P.+?)$", path, @@ -180,14 +183,14 @@ def parse_index_path(path: str) -> Dict[str, str]: @staticmethod def index_endpoint_path(project: str, location: str, index_endpoint: str,) -> str: - """Return a fully-qualified index_endpoint string.""" + """Returns a fully-qualified index_endpoint string.""" return "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format( project=project, location=location, index_endpoint=index_endpoint, ) @staticmethod def parse_index_endpoint_path(path: str) -> Dict[str, str]: - """Parse a index_endpoint path into its component segments.""" + """Parses a index_endpoint path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/indexEndpoints/(?P.+?)$", path, @@ -196,7 +199,7 @@ def parse_index_endpoint_path(path: str) -> Dict[str, str]: @staticmethod def common_billing_account_path(billing_account: str,) -> str: - """Return a fully-qualified billing_account string.""" + """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -209,7 +212,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: @staticmethod def common_folder_path(folder: str,) -> str: - """Return a fully-qualified folder string.""" + """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod @@ -220,7 +223,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: @staticmethod def common_organization_path(organization: str,) -> str: - """Return a fully-qualified organization string.""" + """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod @@ -231,7 +234,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: @staticmethod def common_project_path(project: str,) -> str: - """Return a fully-qualified project string.""" + """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod @@ -242,7 +245,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]: @staticmethod def common_location_path(project: str, location: str,) -> str: - """Return a fully-qualified location string.""" + """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -261,7 +264,7 @@ def __init__( client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the index endpoint service client. + """Instantiates the index endpoint service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -316,9 +319,10 @@ def __init__( client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() - client_cert_source_func = ( - mtls.default_client_cert_source() if is_mtls else None - ) + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -330,12 +334,14 @@ def __init__( elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": - api_endpoint = ( - self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT - ) + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" ) # Save or instantiate the transport. @@ -350,8 +356,8 @@ def __init__( ) if client_options.scopes: raise ValueError( - "When providing a transport instance, " - "provide its scopes directly." + "When providing a transport instance, provide its scopes " + "directly." ) self._transport = transport else: @@ -637,7 +643,7 @@ def update_index_endpoint( should not be set. update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The update mask applies to the resource. See - `FieldMask `__. + [google.protobuf.FieldMask][google.protobuf.FieldMask]. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/pagers.py index f85618275b..36133f390d 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/pagers.py @@ -118,7 +118,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py index 7bd67d7b25..5b72ff53be 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py @@ -35,7 +35,7 @@ class IndexEndpointServiceGrpcTransport(IndexEndpointServiceTransport): """gRPC backend transport for IndexEndpointService. - A service for managing AI Platform's IndexEndpoints. + A service for managing Vertex AI's IndexEndpoints. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py index 81891f2497..dd4b2a1b26 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py @@ -37,7 +37,7 @@ class IndexEndpointServiceGrpcAsyncIOTransport(IndexEndpointServiceTransport): """gRPC AsyncIO backend transport for IndexEndpointService. - A service for managing AI Platform's IndexEndpoints. + A service for managing Vertex AI's IndexEndpoints. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py index 6dc98adec8..6118480843 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py @@ -44,7 +44,7 @@ class IndexServiceAsyncClient: - """A service for creating and managing AI Platform's Index + """A service for creating and managing Vertex AI's Index resources. """ @@ -82,7 +82,8 @@ class IndexServiceAsyncClient: @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -97,7 +98,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -114,7 +115,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> IndexServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: IndexServiceTransport: The transport used by the client instance. @@ -133,7 +134,7 @@ def __init__( client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the index service client. + """Instantiates the index service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -443,7 +444,7 @@ async def update_index( update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): The update mask applies to the resource. For the ``FieldMask`` definition, see - `FieldMask `__. + [google.protobuf.FieldMask][google.protobuf.FieldMask]. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/client.py b/google/cloud/aiplatform_v1beta1/services/index_service/client.py index d30489ea3f..b5894ff8d6 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/index_service/client.py @@ -60,7 +60,7 @@ class IndexServiceClientMeta(type): _transport_registry["grpc_asyncio"] = IndexServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[IndexServiceTransport]: - """Return an appropriate transport class. + """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is @@ -79,13 +79,14 @@ def get_transport_class(cls, label: str = None,) -> Type[IndexServiceTransport]: class IndexServiceClient(metaclass=IndexServiceClientMeta): - """A service for creating and managing AI Platform's Index + """A service for creating and managing Vertex AI's Index resources. """ @staticmethod def _get_default_mtls_endpoint(api_endpoint): - """Convert api endpoint to mTLS endpoint. + """Converts api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: @@ -119,7 +120,8 @@ def _get_default_mtls_endpoint(api_endpoint): @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -136,7 +138,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -155,23 +157,24 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> IndexServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: - IndexServiceTransport: The transport used by the client instance. + IndexServiceTransport: The transport used by the client + instance. """ return self._transport @staticmethod def index_path(project: str, location: str, index: str,) -> str: - """Return a fully-qualified index string.""" + """Returns a fully-qualified index string.""" return "projects/{project}/locations/{location}/indexes/{index}".format( project=project, location=location, index=index, ) @staticmethod def parse_index_path(path: str) -> Dict[str, str]: - """Parse a index path into its component segments.""" + """Parses a index path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/indexes/(?P.+?)$", path, @@ -180,14 +183,14 @@ def parse_index_path(path: str) -> Dict[str, str]: @staticmethod def index_endpoint_path(project: str, location: str, index_endpoint: str,) -> str: - """Return a fully-qualified index_endpoint string.""" + """Returns a fully-qualified index_endpoint string.""" return "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format( project=project, location=location, index_endpoint=index_endpoint, ) @staticmethod def parse_index_endpoint_path(path: str) -> Dict[str, str]: - """Parse a index_endpoint path into its component segments.""" + """Parses a index_endpoint path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/indexEndpoints/(?P.+?)$", path, @@ -196,7 +199,7 @@ def parse_index_endpoint_path(path: str) -> Dict[str, str]: @staticmethod def common_billing_account_path(billing_account: str,) -> str: - """Return a fully-qualified billing_account string.""" + """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -209,7 +212,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: @staticmethod def common_folder_path(folder: str,) -> str: - """Return a fully-qualified folder string.""" + """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod @@ -220,7 +223,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: @staticmethod def common_organization_path(organization: str,) -> str: - """Return a fully-qualified organization string.""" + """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod @@ -231,7 +234,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: @staticmethod def common_project_path(project: str,) -> str: - """Return a fully-qualified project string.""" + """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod @@ -242,7 +245,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]: @staticmethod def common_location_path(project: str, location: str,) -> str: - """Return a fully-qualified location string.""" + """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -261,7 +264,7 @@ def __init__( client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the index service client. + """Instantiates the index service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -316,9 +319,10 @@ def __init__( client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() - client_cert_source_func = ( - mtls.default_client_cert_source() if is_mtls else None - ) + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -330,12 +334,14 @@ def __init__( elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": - api_endpoint = ( - self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT - ) + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" ) # Save or instantiate the transport. @@ -350,8 +356,8 @@ def __init__( ) if client_options.scopes: raise ValueError( - "When providing a transport instance, " - "provide its scopes directly." + "When providing a transport instance, provide its scopes " + "directly." ) self._transport = transport else: @@ -637,7 +643,7 @@ def update_index( update_mask (google.protobuf.field_mask_pb2.FieldMask): The update mask applies to the resource. For the ``FieldMask`` definition, see - `FieldMask `__. + [google.protobuf.FieldMask][google.protobuf.FieldMask]. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/index_service/pagers.py index 010745adb4..06a2965f58 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/index_service/pagers.py @@ -116,7 +116,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py index 9178f1d61a..e07b96d5bd 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py @@ -34,7 +34,7 @@ class IndexServiceGrpcTransport(IndexServiceTransport): """gRPC backend transport for IndexService. - A service for creating and managing AI Platform's Index + A service for creating and managing Vertex AI's Index resources. This class defines the same methods as the primary client, so the diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py index c17e033b7c..d61d6c4912 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py @@ -36,7 +36,7 @@ class IndexServiceGrpcAsyncIOTransport(IndexServiceTransport): """gRPC AsyncIO backend transport for IndexService. - A service for creating and managing AI Platform's Index + A service for creating and managing Vertex AI's Index resources. This class defines the same methods as the primary client, so the diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py index 0396f4db64..ff7f15bd83 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py @@ -71,7 +71,7 @@ class JobServiceAsyncClient: - """A service for creating and managing AI Platform's jobs.""" + """A service for creating and managing Vertex AI's jobs.""" _client: JobServiceClient @@ -133,7 +133,8 @@ class JobServiceAsyncClient: @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -148,7 +149,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -165,7 +166,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> JobServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: JobServiceTransport: The transport used by the client instance. @@ -184,7 +185,7 @@ def __init__( client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the job service client. + """Instantiates the job service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/client.py b/google/cloud/aiplatform_v1beta1/services/job_service/client.py index c802dfd25a..53ba235acd 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/client.py @@ -87,7 +87,7 @@ class JobServiceClientMeta(type): _transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[JobServiceTransport]: - """Return an appropriate transport class. + """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is @@ -106,11 +106,12 @@ def get_transport_class(cls, label: str = None,) -> Type[JobServiceTransport]: class JobServiceClient(metaclass=JobServiceClientMeta): - """A service for creating and managing AI Platform's jobs.""" + """A service for creating and managing Vertex AI's jobs.""" @staticmethod def _get_default_mtls_endpoint(api_endpoint): - """Convert api endpoint to mTLS endpoint. + """Converts api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: @@ -144,7 +145,8 @@ def _get_default_mtls_endpoint(api_endpoint): @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -161,7 +163,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -180,10 +182,11 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> JobServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: - JobServiceTransport: The transport used by the client instance. + JobServiceTransport: The transport used by the client + instance. """ return self._transport @@ -191,7 +194,7 @@ def transport(self) -> JobServiceTransport: def batch_prediction_job_path( project: str, location: str, batch_prediction_job: str, ) -> str: - """Return a fully-qualified batch_prediction_job string.""" + """Returns a fully-qualified batch_prediction_job string.""" return "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format( project=project, location=location, @@ -200,7 +203,7 @@ def batch_prediction_job_path( @staticmethod def parse_batch_prediction_job_path(path: str) -> Dict[str, str]: - """Parse a batch_prediction_job path into its component segments.""" + """Parses a batch_prediction_job path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/batchPredictionJobs/(?P.+?)$", path, @@ -209,14 +212,14 @@ def parse_batch_prediction_job_path(path: str) -> Dict[str, str]: @staticmethod def custom_job_path(project: str, location: str, custom_job: str,) -> str: - """Return a fully-qualified custom_job string.""" + """Returns a fully-qualified custom_job string.""" return "projects/{project}/locations/{location}/customJobs/{custom_job}".format( project=project, location=location, custom_job=custom_job, ) @staticmethod def parse_custom_job_path(path: str) -> Dict[str, str]: - """Parse a custom_job path into its component segments.""" + """Parses a custom_job path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", path, @@ -227,14 +230,14 @@ def parse_custom_job_path(path: str) -> Dict[str, str]: def data_labeling_job_path( project: str, location: str, data_labeling_job: str, ) -> str: - """Return a fully-qualified data_labeling_job string.""" + """Returns a fully-qualified data_labeling_job string.""" return "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}".format( project=project, location=location, data_labeling_job=data_labeling_job, ) @staticmethod def parse_data_labeling_job_path(path: str) -> Dict[str, str]: - """Parse a data_labeling_job path into its component segments.""" + """Parses a data_labeling_job path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/dataLabelingJobs/(?P.+?)$", path, @@ -243,14 +246,14 @@ def parse_data_labeling_job_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path(project: str, location: str, dataset: str,) -> str: - """Return a fully-qualified dataset string.""" + """Returns a fully-qualified dataset string.""" return "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: - """Parse a dataset path into its component segments.""" + """Parses a dataset path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path, @@ -259,14 +262,14 @@ def parse_dataset_path(path: str) -> Dict[str, str]: @staticmethod def endpoint_path(project: str, location: str, endpoint: str,) -> str: - """Return a fully-qualified endpoint string.""" + """Returns a fully-qualified endpoint string.""" return "projects/{project}/locations/{location}/endpoints/{endpoint}".format( project=project, location=location, endpoint=endpoint, ) @staticmethod def parse_endpoint_path(path: str) -> Dict[str, str]: - """Parse a endpoint path into its component segments.""" + """Parses a endpoint path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path, @@ -277,7 +280,7 @@ def parse_endpoint_path(path: str) -> Dict[str, str]: def hyperparameter_tuning_job_path( project: str, location: str, hyperparameter_tuning_job: str, ) -> str: - """Return a fully-qualified hyperparameter_tuning_job string.""" + """Returns a fully-qualified hyperparameter_tuning_job string.""" return "projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}".format( project=project, location=location, @@ -286,7 +289,7 @@ def hyperparameter_tuning_job_path( @staticmethod def parse_hyperparameter_tuning_job_path(path: str) -> Dict[str, str]: - """Parse a hyperparameter_tuning_job path into its component segments.""" + """Parses a hyperparameter_tuning_job path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/hyperparameterTuningJobs/(?P.+?)$", path, @@ -295,14 +298,14 @@ def parse_hyperparameter_tuning_job_path(path: str) -> Dict[str, str]: @staticmethod def model_path(project: str, location: str, model: str,) -> str: - """Return a fully-qualified model string.""" + """Returns a fully-qualified model string.""" return "projects/{project}/locations/{location}/models/{model}".format( project=project, location=location, model=model, ) @staticmethod def parse_model_path(path: str) -> Dict[str, str]: - """Parse a model path into its component segments.""" + """Parses a model path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path, @@ -313,7 +316,7 @@ def parse_model_path(path: str) -> Dict[str, str]: def model_deployment_monitoring_job_path( project: str, location: str, model_deployment_monitoring_job: str, ) -> str: - """Return a fully-qualified model_deployment_monitoring_job string.""" + """Returns a fully-qualified model_deployment_monitoring_job string.""" return "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format( project=project, location=location, @@ -322,7 +325,7 @@ def model_deployment_monitoring_job_path( @staticmethod def parse_model_deployment_monitoring_job_path(path: str) -> Dict[str, str]: - """Parse a model_deployment_monitoring_job path into its component segments.""" + """Parses a model_deployment_monitoring_job path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/modelDeploymentMonitoringJobs/(?P.+?)$", path, @@ -331,14 +334,14 @@ def parse_model_deployment_monitoring_job_path(path: str) -> Dict[str, str]: @staticmethod def network_path(project: str, network: str,) -> str: - """Return a fully-qualified network string.""" + """Returns a fully-qualified network string.""" return "projects/{project}/global/networks/{network}".format( project=project, network=network, ) @staticmethod def parse_network_path(path: str) -> Dict[str, str]: - """Parse a network path into its component segments.""" + """Parses a network path into its component segments.""" m = re.match( r"^projects/(?P.+?)/global/networks/(?P.+?)$", path ) @@ -346,14 +349,14 @@ def parse_network_path(path: str) -> Dict[str, str]: @staticmethod def tensorboard_path(project: str, location: str, tensorboard: str,) -> str: - """Return a fully-qualified tensorboard string.""" + """Returns a fully-qualified tensorboard string.""" return "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format( project=project, location=location, tensorboard=tensorboard, ) @staticmethod def parse_tensorboard_path(path: str) -> Dict[str, str]: - """Parse a tensorboard path into its component segments.""" + """Parses a tensorboard path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)$", path, @@ -362,14 +365,14 @@ def parse_tensorboard_path(path: str) -> Dict[str, str]: @staticmethod def trial_path(project: str, location: str, study: str, trial: str,) -> str: - """Return a fully-qualified trial string.""" + """Returns a fully-qualified trial string.""" return "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format( project=project, location=location, study=study, trial=trial, ) @staticmethod def parse_trial_path(path: str) -> Dict[str, str]: - """Parse a trial path into its component segments.""" + """Parses a trial path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/studies/(?P.+?)/trials/(?P.+?)$", path, @@ -378,7 +381,7 @@ def parse_trial_path(path: str) -> Dict[str, str]: @staticmethod def common_billing_account_path(billing_account: str,) -> str: - """Return a fully-qualified billing_account string.""" + """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -391,7 +394,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: @staticmethod def common_folder_path(folder: str,) -> str: - """Return a fully-qualified folder string.""" + """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod @@ -402,7 +405,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: @staticmethod def common_organization_path(organization: str,) -> str: - """Return a fully-qualified organization string.""" + """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod @@ -413,7 +416,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: @staticmethod def common_project_path(project: str,) -> str: - """Return a fully-qualified project string.""" + """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod @@ -424,7 +427,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]: @staticmethod def common_location_path(project: str, location: str,) -> str: - """Return a fully-qualified location string.""" + """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -443,7 +446,7 @@ def __init__( client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the job service client. + """Instantiates the job service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -498,9 +501,10 @@ def __init__( client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() - client_cert_source_func = ( - mtls.default_client_cert_source() if is_mtls else None - ) + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -512,12 +516,14 @@ def __init__( elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": - api_endpoint = ( - self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT - ) + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" ) # Save or instantiate the transport. @@ -532,8 +538,8 @@ def __init__( ) if client_options.scopes: raise ValueError( - "When providing a transport instance, " - "provide its scopes directly." + "When providing a transport instance, provide its scopes " + "directly." ) self._transport = transport else: diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py index cc1d17b38b..e8229e4a89 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py @@ -123,7 +123,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -251,7 +251,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -381,7 +381,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -513,7 +513,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -654,7 +654,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -796,7 +796,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py index 797420d74a..cd6247ab97 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py @@ -52,7 +52,7 @@ class JobServiceGrpcTransport(JobServiceTransport): """gRPC backend transport for JobService. - A service for creating and managing AI Platform's jobs. + A service for creating and managing Vertex AI's jobs. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py index 93f5416555..056dfde986 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py @@ -54,7 +54,7 @@ class JobServiceGrpcAsyncIOTransport(JobServiceTransport): """gRPC AsyncIO backend transport for JobService. - A service for creating and managing AI Platform's jobs. + A service for creating and managing Vertex AI's jobs. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py index 18488133ce..c886cbda34 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py @@ -102,7 +102,8 @@ class MetadataServiceAsyncClient: @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -117,7 +118,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -134,7 +135,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> MetadataServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: MetadataServiceTransport: The transport used by the client instance. @@ -153,7 +154,7 @@ def __init__( client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the metadata service client. + """Instantiates the metadata service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -918,7 +919,7 @@ async def create_context( context_id (:class:`str`): The {context} portion of the resource name with the format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}. If not provided, the Context's ID will be a UUID generated by the service. Must be 4-128 characters in length. Valid characters are /[a-z][0-9]-/. Must be @@ -1340,6 +1341,8 @@ async def add_context_artifacts_and_executions( artifacts (:class:`Sequence[str]`): The resource names of the Artifacts to attribute to the Context. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} This corresponds to the ``artifacts`` field on the ``request`` instance; if ``request`` is provided, this @@ -1348,6 +1351,9 @@ async def add_context_artifacts_and_executions( The resource names of the Executions to associate with the Context. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + This corresponds to the ``executions`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1418,7 +1424,7 @@ async def add_context_children( of the child Contexts have already been added to the parent Context, they are simply skipped. If this call would create a cycle or cause any Context to have more than 10 parents, the - request will fail with INVALID_ARGUMENT error. + request will fail with an INVALID_ARGUMENT error. Args: request (:class:`google.cloud.aiplatform_v1beta1.types.AddContextChildrenRequest`): @@ -1426,7 +1432,8 @@ async def add_context_children( [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. context (:class:`str`): Required. The resource name of the - parent Context. Format: + parent Context. + Format: projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} This corresponds to the ``context`` field @@ -1914,10 +1921,11 @@ async def add_execution_events( timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> metadata_service.AddExecutionEventsResponse: - r"""Adds Events for denoting whether each Artifact was an - input or output for a given Execution. If any Events - already exist between the Execution and any of the - specified Artifacts they are simply skipped. + r"""Adds Events to the specified Execution. An Event + indicates whether an Artifact was used as an input or + output for an Execution. If an Event already exists + between the Execution and the Artifact, the Event is + skipped. Args: request (:class:`google.cloud.aiplatform_v1beta1.types.AddExecutionEventsRequest`): @@ -2080,7 +2088,7 @@ async def create_metadata_schema( timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gca_metadata_schema.MetadataSchema: - r"""Creates an MetadataSchema. + r"""Creates a MetadataSchema. Args: request (:class:`google.cloud.aiplatform_v1beta1.types.CreateMetadataSchemaRequest`): diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py index 5c3a8871cc..4f4ed9360e 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py @@ -72,7 +72,7 @@ class MetadataServiceClientMeta(type): _transport_registry["grpc_asyncio"] = MetadataServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[MetadataServiceTransport]: - """Return an appropriate transport class. + """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is @@ -95,7 +95,8 @@ class MetadataServiceClient(metaclass=MetadataServiceClientMeta): @staticmethod def _get_default_mtls_endpoint(api_endpoint): - """Convert api endpoint to mTLS endpoint. + """Converts api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: @@ -129,7 +130,8 @@ def _get_default_mtls_endpoint(api_endpoint): @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -146,7 +148,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -165,10 +167,11 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> MetadataServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: - MetadataServiceTransport: The transport used by the client instance. + MetadataServiceTransport: The transport used by the client + instance. """ return self._transport @@ -176,7 +179,7 @@ def transport(self) -> MetadataServiceTransport: def artifact_path( project: str, location: str, metadata_store: str, artifact: str, ) -> str: - """Return a fully-qualified artifact string.""" + """Returns a fully-qualified artifact string.""" return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format( project=project, location=location, @@ -186,7 +189,7 @@ def artifact_path( @staticmethod def parse_artifact_path(path: str) -> Dict[str, str]: - """Parse a artifact path into its component segments.""" + """Parses a artifact path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/artifacts/(?P.+?)$", path, @@ -197,7 +200,7 @@ def parse_artifact_path(path: str) -> Dict[str, str]: def context_path( project: str, location: str, metadata_store: str, context: str, ) -> str: - """Return a fully-qualified context string.""" + """Returns a fully-qualified context string.""" return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format( project=project, location=location, @@ -207,7 +210,7 @@ def context_path( @staticmethod def parse_context_path(path: str) -> Dict[str, str]: - """Parse a context path into its component segments.""" + """Parses a context path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/contexts/(?P.+?)$", path, @@ -218,7 +221,7 @@ def parse_context_path(path: str) -> Dict[str, str]: def execution_path( project: str, location: str, metadata_store: str, execution: str, ) -> str: - """Return a fully-qualified execution string.""" + """Returns a fully-qualified execution string.""" return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format( project=project, location=location, @@ -228,7 +231,7 @@ def execution_path( @staticmethod def parse_execution_path(path: str) -> Dict[str, str]: - """Parse a execution path into its component segments.""" + """Parses a execution path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/executions/(?P.+?)$", path, @@ -239,7 +242,7 @@ def parse_execution_path(path: str) -> Dict[str, str]: def metadata_schema_path( project: str, location: str, metadata_store: str, metadata_schema: str, ) -> str: - """Return a fully-qualified metadata_schema string.""" + """Returns a fully-qualified metadata_schema string.""" return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}".format( project=project, location=location, @@ -249,7 +252,7 @@ def metadata_schema_path( @staticmethod def parse_metadata_schema_path(path: str) -> Dict[str, str]: - """Parse a metadata_schema path into its component segments.""" + """Parses a metadata_schema path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/metadataSchemas/(?P.+?)$", path, @@ -258,14 +261,14 @@ def parse_metadata_schema_path(path: str) -> Dict[str, str]: @staticmethod def metadata_store_path(project: str, location: str, metadata_store: str,) -> str: - """Return a fully-qualified metadata_store string.""" + """Returns a fully-qualified metadata_store string.""" return "projects/{project}/locations/{location}/metadataStores/{metadata_store}".format( project=project, location=location, metadata_store=metadata_store, ) @staticmethod def parse_metadata_store_path(path: str) -> Dict[str, str]: - """Parse a metadata_store path into its component segments.""" + """Parses a metadata_store path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)$", path, @@ -274,7 +277,7 @@ def parse_metadata_store_path(path: str) -> Dict[str, str]: @staticmethod def common_billing_account_path(billing_account: str,) -> str: - """Return a fully-qualified billing_account string.""" + """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -287,7 +290,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: @staticmethod def common_folder_path(folder: str,) -> str: - """Return a fully-qualified folder string.""" + """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod @@ -298,7 +301,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: @staticmethod def common_organization_path(organization: str,) -> str: - """Return a fully-qualified organization string.""" + """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod @@ -309,7 +312,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: @staticmethod def common_project_path(project: str,) -> str: - """Return a fully-qualified project string.""" + """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod @@ -320,7 +323,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]: @staticmethod def common_location_path(project: str, location: str,) -> str: - """Return a fully-qualified location string.""" + """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -339,7 +342,7 @@ def __init__( client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the metadata service client. + """Instantiates the metadata service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -394,9 +397,10 @@ def __init__( client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() - client_cert_source_func = ( - mtls.default_client_cert_source() if is_mtls else None - ) + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -408,12 +412,14 @@ def __init__( elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": - api_endpoint = ( - self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT - ) + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" ) # Save or instantiate the transport. @@ -428,8 +434,8 @@ def __init__( ) if client_options.scopes: raise ValueError( - "When providing a transport instance, " - "provide its scopes directly." + "When providing a transport instance, provide its scopes " + "directly." ) self._transport = transport else: @@ -1170,7 +1176,7 @@ def create_context( context_id (str): The {context} portion of the resource name with the format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}. If not provided, the Context's ID will be a UUID generated by the service. Must be 4-128 characters in length. Valid characters are /[a-z][0-9]-/. Must be @@ -1592,6 +1598,8 @@ def add_context_artifacts_and_executions( artifacts (Sequence[str]): The resource names of the Artifacts to attribute to the Context. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} This corresponds to the ``artifacts`` field on the ``request`` instance; if ``request`` is provided, this @@ -1600,6 +1608,9 @@ def add_context_artifacts_and_executions( The resource names of the Executions to associate with the Context. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + This corresponds to the ``executions`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1674,7 +1685,7 @@ def add_context_children( of the child Contexts have already been added to the parent Context, they are simply skipped. If this call would create a cycle or cause any Context to have more than 10 parents, the - request will fail with INVALID_ARGUMENT error. + request will fail with an INVALID_ARGUMENT error. Args: request (google.cloud.aiplatform_v1beta1.types.AddContextChildrenRequest): @@ -1682,7 +1693,8 @@ def add_context_children( [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. context (str): Required. The resource name of the - parent Context. Format: + parent Context. + Format: projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} This corresponds to the ``context`` field @@ -2172,10 +2184,11 @@ def add_execution_events( timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> metadata_service.AddExecutionEventsResponse: - r"""Adds Events for denoting whether each Artifact was an - input or output for a given Execution. If any Events - already exist between the Execution and any of the - specified Artifacts they are simply skipped. + r"""Adds Events to the specified Execution. An Event + indicates whether an Artifact was used as an input or + output for an Execution. If an Event already exists + between the Execution and the Artifact, the Event is + skipped. Args: request (google.cloud.aiplatform_v1beta1.types.AddExecutionEventsRequest): @@ -2342,7 +2355,7 @@ def create_metadata_schema( timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gca_metadata_schema.MetadataSchema: - r"""Creates an MetadataSchema. + r"""Creates a MetadataSchema. Args: request (google.cloud.aiplatform_v1beta1.types.CreateMetadataSchemaRequest): diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/pagers.py index 1366829837..5c58aad925 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/pagers.py @@ -120,7 +120,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -248,7 +248,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -376,7 +376,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -504,7 +504,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -632,7 +632,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py index 0ad440d8c6..aa550b0829 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py @@ -647,7 +647,7 @@ def add_context_children( of the child Contexts have already been added to the parent Context, they are simply skipped. If this call would create a cycle or cause any Context to have more than 10 parents, the - request will fail with INVALID_ARGUMENT error. + request will fail with an INVALID_ARGUMENT error. Returns: Callable[[~.AddContextChildrenRequest], @@ -816,10 +816,11 @@ def add_execution_events( ]: r"""Return a callable for the add execution events method over gRPC. - Adds Events for denoting whether each Artifact was an - input or output for a given Execution. If any Events - already exist between the Execution and any of the - specified Artifacts they are simply skipped. + Adds Events to the specified Execution. An Event + indicates whether an Artifact was used as an input or + output for an Execution. If an Event already exists + between the Execution and the Artifact, the Event is + skipped. Returns: Callable[[~.AddExecutionEventsRequest], @@ -882,7 +883,7 @@ def create_metadata_schema( ]: r"""Return a callable for the create metadata schema method over gRPC. - Creates an MetadataSchema. + Creates a MetadataSchema. Returns: Callable[[~.CreateMetadataSchemaRequest], diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py index c9020e1101..119ff4d369 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py @@ -667,7 +667,7 @@ def add_context_children( of the child Contexts have already been added to the parent Context, they are simply skipped. If this call would create a cycle or cause any Context to have more than 10 parents, the - request will fail with INVALID_ARGUMENT error. + request will fail with an INVALID_ARGUMENT error. Returns: Callable[[~.AddContextChildrenRequest], @@ -842,10 +842,11 @@ def add_execution_events( ]: r"""Return a callable for the add execution events method over gRPC. - Adds Events for denoting whether each Artifact was an - input or output for a given Execution. If any Events - already exist between the Execution and any of the - specified Artifacts they are simply skipped. + Adds Events to the specified Execution. An Event + indicates whether an Artifact was used as an input or + output for an Execution. If an Event already exists + between the Execution and the Artifact, the Event is + skipped. Returns: Callable[[~.AddExecutionEventsRequest], @@ -908,7 +909,7 @@ def create_metadata_schema( ]: r"""Return a callable for the create metadata schema method over gRPC. - Creates an MetadataSchema. + Creates a MetadataSchema. Returns: Callable[[~.CreateMetadataSchemaRequest], diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py index 5549e1c870..8ce212f958 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py @@ -38,8 +38,7 @@ class MigrationServiceAsyncClient: """A service that migrates resources from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to AI - Platform. + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. """ _client: MigrationServiceClient @@ -90,7 +89,8 @@ class MigrationServiceAsyncClient: @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -105,7 +105,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -122,7 +122,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> MigrationServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: MigrationServiceTransport: The transport used by the client instance. @@ -141,7 +141,7 @@ def __init__( client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the migration service client. + """Instantiates the migration service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -191,7 +191,7 @@ async def search_migratable_resources( ) -> pagers.SearchMigratableResourcesAsyncPager: r"""Searches all of the resources in automl.googleapis.com, datalabeling.googleapis.com and - ml.googleapis.com that can be migrated to AI Platform's + ml.googleapis.com that can be migrated to Vertex AI's given location. Args: @@ -200,7 +200,7 @@ async def search_migratable_resources( [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. parent (:class:`str`): Required. The location that the migratable resources - should be searched from. It's the AI Platform location + should be searched from. It's the Vertex AI location that the resources can be migrated to, not the resources' original location. Format: ``projects/{project}/locations/{location}`` @@ -280,7 +280,7 @@ async def batch_migrate_resources( ) -> operation_async.AsyncOperation: r"""Batch migrates resources from ml.googleapis.com, automl.googleapis.com, and datalabeling.googleapis.com - to AI Platform (Unified). + to Vertex AI. Args: request (:class:`google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesRequest`): diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py index 7167f186ae..3328feaab9 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py @@ -55,7 +55,7 @@ class MigrationServiceClientMeta(type): _transport_registry["grpc_asyncio"] = MigrationServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[MigrationServiceTransport]: - """Return an appropriate transport class. + """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is @@ -75,13 +75,13 @@ def get_transport_class(cls, label: str = None,) -> Type[MigrationServiceTranspo class MigrationServiceClient(metaclass=MigrationServiceClientMeta): """A service that migrates resources from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to AI - Platform. + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. """ @staticmethod def _get_default_mtls_endpoint(api_endpoint): - """Convert api endpoint to mTLS endpoint. + """Converts api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: @@ -115,7 +115,8 @@ def _get_default_mtls_endpoint(api_endpoint): @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -132,7 +133,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -151,10 +152,11 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> MigrationServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: - MigrationServiceTransport: The transport used by the client instance. + MigrationServiceTransport: The transport used by the client + instance. """ return self._transport @@ -162,14 +164,14 @@ def transport(self) -> MigrationServiceTransport: def annotated_dataset_path( project: str, dataset: str, annotated_dataset: str, ) -> str: - """Return a fully-qualified annotated_dataset string.""" + """Returns a fully-qualified annotated_dataset string.""" return "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}".format( project=project, dataset=dataset, annotated_dataset=annotated_dataset, ) @staticmethod def parse_annotated_dataset_path(path: str) -> Dict[str, str]: - """Parse a annotated_dataset path into its component segments.""" + """Parses a annotated_dataset path into its component segments.""" m = re.match( r"^projects/(?P.+?)/datasets/(?P.+?)/annotatedDatasets/(?P.+?)$", path, @@ -178,14 +180,14 @@ def parse_annotated_dataset_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path(project: str, location: str, dataset: str,) -> str: - """Return a fully-qualified dataset string.""" + """Returns a fully-qualified dataset string.""" return "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: - """Parse a dataset path into its component segments.""" + """Parses a dataset path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path, @@ -194,27 +196,27 @@ def parse_dataset_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path(project: str, dataset: str,) -> str: - """Return a fully-qualified dataset string.""" + """Returns a fully-qualified dataset string.""" return "projects/{project}/datasets/{dataset}".format( project=project, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: - """Parse a dataset path into its component segments.""" + """Parses a dataset path into its component segments.""" m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod def dataset_path(project: str, location: str, dataset: str,) -> str: - """Return a fully-qualified dataset string.""" + """Returns a fully-qualified dataset string.""" return "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: - """Parse a dataset path into its component segments.""" + """Parses a dataset path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path, @@ -223,14 +225,14 @@ def parse_dataset_path(path: str) -> Dict[str, str]: @staticmethod def model_path(project: str, location: str, model: str,) -> str: - """Return a fully-qualified model string.""" + """Returns a fully-qualified model string.""" return "projects/{project}/locations/{location}/models/{model}".format( project=project, location=location, model=model, ) @staticmethod def parse_model_path(path: str) -> Dict[str, str]: - """Parse a model path into its component segments.""" + """Parses a model path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path, @@ -239,14 +241,14 @@ def parse_model_path(path: str) -> Dict[str, str]: @staticmethod def model_path(project: str, location: str, model: str,) -> str: - """Return a fully-qualified model string.""" + """Returns a fully-qualified model string.""" return "projects/{project}/locations/{location}/models/{model}".format( project=project, location=location, model=model, ) @staticmethod def parse_model_path(path: str) -> Dict[str, str]: - """Parse a model path into its component segments.""" + """Parses a model path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path, @@ -255,14 +257,14 @@ def parse_model_path(path: str) -> Dict[str, str]: @staticmethod def version_path(project: str, model: str, version: str,) -> str: - """Return a fully-qualified version string.""" + """Returns a fully-qualified version string.""" return "projects/{project}/models/{model}/versions/{version}".format( project=project, model=model, version=version, ) @staticmethod def parse_version_path(path: str) -> Dict[str, str]: - """Parse a version path into its component segments.""" + """Parses a version path into its component segments.""" m = re.match( r"^projects/(?P.+?)/models/(?P.+?)/versions/(?P.+?)$", path, @@ -271,7 +273,7 @@ def parse_version_path(path: str) -> Dict[str, str]: @staticmethod def common_billing_account_path(billing_account: str,) -> str: - """Return a fully-qualified billing_account string.""" + """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -284,7 +286,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: @staticmethod def common_folder_path(folder: str,) -> str: - """Return a fully-qualified folder string.""" + """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod @@ -295,7 +297,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: @staticmethod def common_organization_path(organization: str,) -> str: - """Return a fully-qualified organization string.""" + """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod @@ -306,7 +308,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: @staticmethod def common_project_path(project: str,) -> str: - """Return a fully-qualified project string.""" + """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod @@ -317,7 +319,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]: @staticmethod def common_location_path(project: str, location: str,) -> str: - """Return a fully-qualified location string.""" + """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -336,7 +338,7 @@ def __init__( client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the migration service client. + """Instantiates the migration service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -391,9 +393,10 @@ def __init__( client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() - client_cert_source_func = ( - mtls.default_client_cert_source() if is_mtls else None - ) + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -405,12 +408,14 @@ def __init__( elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": - api_endpoint = ( - self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT - ) + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" ) # Save or instantiate the transport. @@ -425,8 +430,8 @@ def __init__( ) if client_options.scopes: raise ValueError( - "When providing a transport instance, " - "provide its scopes directly." + "When providing a transport instance, provide its scopes " + "directly." ) self._transport = transport else: @@ -452,7 +457,7 @@ def search_migratable_resources( ) -> pagers.SearchMigratableResourcesPager: r"""Searches all of the resources in automl.googleapis.com, datalabeling.googleapis.com and - ml.googleapis.com that can be migrated to AI Platform's + ml.googleapis.com that can be migrated to Vertex AI's given location. Args: @@ -461,7 +466,7 @@ def search_migratable_resources( [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. parent (str): Required. The location that the migratable resources - should be searched from. It's the AI Platform location + should be searched from. It's the Vertex AI location that the resources can be migrated to, not the resources' original location. Format: ``projects/{project}/locations/{location}`` @@ -543,7 +548,7 @@ def batch_migrate_resources( ) -> operation.Operation: r"""Batch migrates resources from ml.googleapis.com, automl.googleapis.com, and datalabeling.googleapis.com - to AI Platform (Unified). + to Vertex AI. Args: request (google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesRequest): diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/migration_service/pagers.py index 0756f5b1c4..af6f4fa736 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/pagers.py @@ -118,7 +118,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py index 372e413534..eff1c46c85 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py @@ -34,8 +34,7 @@ class MigrationServiceGrpcTransport(MigrationServiceTransport): """gRPC backend transport for MigrationService. A service that migrates resources from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to AI - Platform. + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation @@ -251,7 +250,7 @@ def search_migratable_resources( Searches all of the resources in automl.googleapis.com, datalabeling.googleapis.com and - ml.googleapis.com that can be migrated to AI Platform's + ml.googleapis.com that can be migrated to Vertex AI's given location. Returns: @@ -282,7 +281,7 @@ def batch_migrate_resources( Batch migrates resources from ml.googleapis.com, automl.googleapis.com, and datalabeling.googleapis.com - to AI Platform (Unified). + to Vertex AI. Returns: Callable[[~.BatchMigrateResourcesRequest], diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py index e98ea4d789..79c5fdcee2 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py @@ -36,8 +36,7 @@ class MigrationServiceGrpcAsyncIOTransport(MigrationServiceTransport): """gRPC AsyncIO backend transport for MigrationService. A service that migrates resources from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to AI - Platform. + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation @@ -256,7 +255,7 @@ def search_migratable_resources( Searches all of the resources in automl.googleapis.com, datalabeling.googleapis.com and - ml.googleapis.com that can be migrated to AI Platform's + ml.googleapis.com that can be migrated to Vertex AI's given location. Returns: @@ -288,7 +287,7 @@ def batch_migrate_resources( Batch migrates resources from ml.googleapis.com, automl.googleapis.com, and datalabeling.googleapis.com - to AI Platform (Unified). + to Vertex AI. Returns: Callable[[~.BatchMigrateResourcesRequest], diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py index d4b8081ed8..c598f10a7c 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py @@ -48,7 +48,7 @@ class ModelServiceAsyncClient: - """A service for managing AI Platform's machine learning Models.""" + """A service for managing Vertex AI's machine learning Models.""" _client: ModelServiceClient @@ -96,7 +96,8 @@ class ModelServiceAsyncClient: @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -111,7 +112,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -128,7 +129,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> ModelServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: ModelServiceTransport: The transport used by the client instance. @@ -147,7 +148,7 @@ def __init__( client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the model service client. + """Instantiates the model service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -196,7 +197,7 @@ async def upload_model( timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: - r"""Uploads a Model artifact into AI Platform. + r"""Uploads a Model artifact into Vertex AI. Args: request (:class:`google.cloud.aiplatform_v1beta1.types.UploadModelRequest`): @@ -455,7 +456,7 @@ async def update_model( update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): Required. The update mask applies to the resource. For the ``FieldMask`` definition, see - `FieldMask `__. + [google.protobuf.FieldMask][google.protobuf.FieldMask]. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/client.py b/google/cloud/aiplatform_v1beta1/services/model_service/client.py index 413426bcba..c8474b7e64 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/client.py @@ -64,7 +64,7 @@ class ModelServiceClientMeta(type): _transport_registry["grpc_asyncio"] = ModelServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[ModelServiceTransport]: - """Return an appropriate transport class. + """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is @@ -83,11 +83,12 @@ def get_transport_class(cls, label: str = None,) -> Type[ModelServiceTransport]: class ModelServiceClient(metaclass=ModelServiceClientMeta): - """A service for managing AI Platform's machine learning Models.""" + """A service for managing Vertex AI's machine learning Models.""" @staticmethod def _get_default_mtls_endpoint(api_endpoint): - """Convert api endpoint to mTLS endpoint. + """Converts api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: @@ -121,7 +122,8 @@ def _get_default_mtls_endpoint(api_endpoint): @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -138,7 +140,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -157,23 +159,24 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> ModelServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: - ModelServiceTransport: The transport used by the client instance. + ModelServiceTransport: The transport used by the client + instance. """ return self._transport @staticmethod def endpoint_path(project: str, location: str, endpoint: str,) -> str: - """Return a fully-qualified endpoint string.""" + """Returns a fully-qualified endpoint string.""" return "projects/{project}/locations/{location}/endpoints/{endpoint}".format( project=project, location=location, endpoint=endpoint, ) @staticmethod def parse_endpoint_path(path: str) -> Dict[str, str]: - """Parse a endpoint path into its component segments.""" + """Parses a endpoint path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path, @@ -182,14 +185,14 @@ def parse_endpoint_path(path: str) -> Dict[str, str]: @staticmethod def model_path(project: str, location: str, model: str,) -> str: - """Return a fully-qualified model string.""" + """Returns a fully-qualified model string.""" return "projects/{project}/locations/{location}/models/{model}".format( project=project, location=location, model=model, ) @staticmethod def parse_model_path(path: str) -> Dict[str, str]: - """Parse a model path into its component segments.""" + """Parses a model path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path, @@ -200,14 +203,14 @@ def parse_model_path(path: str) -> Dict[str, str]: def model_evaluation_path( project: str, location: str, model: str, evaluation: str, ) -> str: - """Return a fully-qualified model_evaluation string.""" + """Returns a fully-qualified model_evaluation string.""" return "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}".format( project=project, location=location, model=model, evaluation=evaluation, ) @staticmethod def parse_model_evaluation_path(path: str) -> Dict[str, str]: - """Parse a model_evaluation path into its component segments.""" + """Parses a model_evaluation path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/evaluations/(?P.+?)$", path, @@ -218,7 +221,7 @@ def parse_model_evaluation_path(path: str) -> Dict[str, str]: def model_evaluation_slice_path( project: str, location: str, model: str, evaluation: str, slice: str, ) -> str: - """Return a fully-qualified model_evaluation_slice string.""" + """Returns a fully-qualified model_evaluation_slice string.""" return "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}".format( project=project, location=location, @@ -229,7 +232,7 @@ def model_evaluation_slice_path( @staticmethod def parse_model_evaluation_slice_path(path: str) -> Dict[str, str]: - """Parse a model_evaluation_slice path into its component segments.""" + """Parses a model_evaluation_slice path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/evaluations/(?P.+?)/slices/(?P.+?)$", path, @@ -240,14 +243,14 @@ def parse_model_evaluation_slice_path(path: str) -> Dict[str, str]: def training_pipeline_path( project: str, location: str, training_pipeline: str, ) -> str: - """Return a fully-qualified training_pipeline string.""" + """Returns a fully-qualified training_pipeline string.""" return "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format( project=project, location=location, training_pipeline=training_pipeline, ) @staticmethod def parse_training_pipeline_path(path: str) -> Dict[str, str]: - """Parse a training_pipeline path into its component segments.""" + """Parses a training_pipeline path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/trainingPipelines/(?P.+?)$", path, @@ -256,7 +259,7 @@ def parse_training_pipeline_path(path: str) -> Dict[str, str]: @staticmethod def common_billing_account_path(billing_account: str,) -> str: - """Return a fully-qualified billing_account string.""" + """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -269,7 +272,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: @staticmethod def common_folder_path(folder: str,) -> str: - """Return a fully-qualified folder string.""" + """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod @@ -280,7 +283,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: @staticmethod def common_organization_path(organization: str,) -> str: - """Return a fully-qualified organization string.""" + """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod @@ -291,7 +294,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: @staticmethod def common_project_path(project: str,) -> str: - """Return a fully-qualified project string.""" + """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod @@ -302,7 +305,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]: @staticmethod def common_location_path(project: str, location: str,) -> str: - """Return a fully-qualified location string.""" + """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -321,7 +324,7 @@ def __init__( client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the model service client. + """Instantiates the model service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -376,9 +379,10 @@ def __init__( client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() - client_cert_source_func = ( - mtls.default_client_cert_source() if is_mtls else None - ) + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -390,12 +394,14 @@ def __init__( elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": - api_endpoint = ( - self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT - ) + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" ) # Save or instantiate the transport. @@ -410,8 +416,8 @@ def __init__( ) if client_options.scopes: raise ValueError( - "When providing a transport instance, " - "provide its scopes directly." + "When providing a transport instance, provide its scopes " + "directly." ) self._transport = transport else: @@ -436,7 +442,7 @@ def upload_model( timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gac_operation.Operation: - r"""Uploads a Model artifact into AI Platform. + r"""Uploads a Model artifact into Vertex AI. Args: request (google.cloud.aiplatform_v1beta1.types.UploadModelRequest): @@ -695,7 +701,7 @@ def update_model( update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The update mask applies to the resource. For the ``FieldMask`` definition, see - `FieldMask `__. + [google.protobuf.FieldMask][google.protobuf.FieldMask]. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/model_service/pagers.py index ea9f49f5ab..c1c5e47d21 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/pagers.py @@ -118,7 +118,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -246,7 +246,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -376,7 +376,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py index 1fe74c535e..3bd0d0ec59 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py @@ -37,7 +37,7 @@ class ModelServiceGrpcTransport(ModelServiceTransport): """gRPC backend transport for ModelService. - A service for managing AI Platform's machine learning Models. + A service for managing Vertex AI's machine learning Models. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation @@ -248,7 +248,7 @@ def upload_model( ) -> Callable[[model_service.UploadModelRequest], operations_pb2.Operation]: r"""Return a callable for the upload model method over gRPC. - Uploads a Model artifact into AI Platform. + Uploads a Model artifact into Vertex AI. Returns: Callable[[~.UploadModelRequest], diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py index 3e56398431..9f0a0a7230 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py @@ -39,7 +39,7 @@ class ModelServiceGrpcAsyncIOTransport(ModelServiceTransport): """gRPC AsyncIO backend transport for ModelService. - A service for managing AI Platform's machine learning Models. + A service for managing Vertex AI's machine learning Models. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation @@ -255,7 +255,7 @@ def upload_model( ]: r"""Return a callable for the upload model method over gRPC. - Uploads a Model artifact into AI Platform. + Uploads a Model artifact into Vertex AI. Returns: Callable[[~.UploadModelRequest], diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py index 9c368150ab..b6d3e9cc2c 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py @@ -50,7 +50,11 @@ class PipelineServiceAsyncClient: - """A service for creating and managing AI Platform's pipelines.""" + """A service for creating and managing Vertex AI's pipelines. This + includes both ``TrainingPipeline`` resources (used for AutoML and + custom training) and ``PipelineJob`` resources (used for Vertex + Pipelines). + """ _client: PipelineServiceClient @@ -106,7 +110,8 @@ class PipelineServiceAsyncClient: @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -121,7 +126,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -138,7 +143,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> PipelineServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: PipelineServiceTransport: The transport used by the client instance. @@ -157,7 +162,7 @@ def __init__( client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the pipeline service client. + """Instantiates the pipeline service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -238,10 +243,10 @@ async def create_training_pipeline( google.cloud.aiplatform_v1beta1.types.TrainingPipeline: The TrainingPipeline orchestrates tasks associated with training a Model. It always executes the training task, and optionally may - also export data from AI Platform's Dataset which + also export data from Vertex AI's Dataset which becomes the training input, [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] - the Model to AI Platform, and evaluate the Model. + the Model to Vertex AI, and evaluate the Model. """ # Create or coerce a protobuf request object. @@ -316,10 +321,10 @@ async def get_training_pipeline( google.cloud.aiplatform_v1beta1.types.TrainingPipeline: The TrainingPipeline orchestrates tasks associated with training a Model. It always executes the training task, and optionally may - also export data from AI Platform's Dataset which + also export data from Vertex AI's Dataset which becomes the training input, [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] - the Model to AI Platform, and evaluate the Model. + the Model to Vertex AI, and evaluate the Model. """ # Create or coerce a protobuf request object. diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py index 106452a82b..ac589a4a24 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py @@ -68,7 +68,7 @@ class PipelineServiceClientMeta(type): _transport_registry["grpc_asyncio"] = PipelineServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[PipelineServiceTransport]: - """Return an appropriate transport class. + """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is @@ -87,11 +87,16 @@ def get_transport_class(cls, label: str = None,) -> Type[PipelineServiceTranspor class PipelineServiceClient(metaclass=PipelineServiceClientMeta): - """A service for creating and managing AI Platform's pipelines.""" + """A service for creating and managing Vertex AI's pipelines. This + includes both ``TrainingPipeline`` resources (used for AutoML and + custom training) and ``PipelineJob`` resources (used for Vertex + Pipelines). + """ @staticmethod def _get_default_mtls_endpoint(api_endpoint): - """Convert api endpoint to mTLS endpoint. + """Converts api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: @@ -125,7 +130,8 @@ def _get_default_mtls_endpoint(api_endpoint): @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -142,7 +148,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -161,10 +167,11 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> PipelineServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: - PipelineServiceTransport: The transport used by the client instance. + PipelineServiceTransport: The transport used by the client + instance. """ return self._transport @@ -172,7 +179,7 @@ def transport(self) -> PipelineServiceTransport: def artifact_path( project: str, location: str, metadata_store: str, artifact: str, ) -> str: - """Return a fully-qualified artifact string.""" + """Returns a fully-qualified artifact string.""" return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format( project=project, location=location, @@ -182,7 +189,7 @@ def artifact_path( @staticmethod def parse_artifact_path(path: str) -> Dict[str, str]: - """Parse a artifact path into its component segments.""" + """Parses a artifact path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/artifacts/(?P.+?)$", path, @@ -193,7 +200,7 @@ def parse_artifact_path(path: str) -> Dict[str, str]: def context_path( project: str, location: str, metadata_store: str, context: str, ) -> str: - """Return a fully-qualified context string.""" + """Returns a fully-qualified context string.""" return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format( project=project, location=location, @@ -203,7 +210,7 @@ def context_path( @staticmethod def parse_context_path(path: str) -> Dict[str, str]: - """Parse a context path into its component segments.""" + """Parses a context path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/contexts/(?P.+?)$", path, @@ -212,14 +219,14 @@ def parse_context_path(path: str) -> Dict[str, str]: @staticmethod def custom_job_path(project: str, location: str, custom_job: str,) -> str: - """Return a fully-qualified custom_job string.""" + """Returns a fully-qualified custom_job string.""" return "projects/{project}/locations/{location}/customJobs/{custom_job}".format( project=project, location=location, custom_job=custom_job, ) @staticmethod def parse_custom_job_path(path: str) -> Dict[str, str]: - """Parse a custom_job path into its component segments.""" + """Parses a custom_job path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", path, @@ -228,14 +235,14 @@ def parse_custom_job_path(path: str) -> Dict[str, str]: @staticmethod def endpoint_path(project: str, location: str, endpoint: str,) -> str: - """Return a fully-qualified endpoint string.""" + """Returns a fully-qualified endpoint string.""" return "projects/{project}/locations/{location}/endpoints/{endpoint}".format( project=project, location=location, endpoint=endpoint, ) @staticmethod def parse_endpoint_path(path: str) -> Dict[str, str]: - """Parse a endpoint path into its component segments.""" + """Parses a endpoint path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path, @@ -246,7 +253,7 @@ def parse_endpoint_path(path: str) -> Dict[str, str]: def execution_path( project: str, location: str, metadata_store: str, execution: str, ) -> str: - """Return a fully-qualified execution string.""" + """Returns a fully-qualified execution string.""" return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format( project=project, location=location, @@ -256,7 +263,7 @@ def execution_path( @staticmethod def parse_execution_path(path: str) -> Dict[str, str]: - """Parse a execution path into its component segments.""" + """Parses a execution path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/executions/(?P.+?)$", path, @@ -265,14 +272,14 @@ def parse_execution_path(path: str) -> Dict[str, str]: @staticmethod def model_path(project: str, location: str, model: str,) -> str: - """Return a fully-qualified model string.""" + """Returns a fully-qualified model string.""" return "projects/{project}/locations/{location}/models/{model}".format( project=project, location=location, model=model, ) @staticmethod def parse_model_path(path: str) -> Dict[str, str]: - """Parse a model path into its component segments.""" + """Parses a model path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path, @@ -281,14 +288,14 @@ def parse_model_path(path: str) -> Dict[str, str]: @staticmethod def network_path(project: str, network: str,) -> str: - """Return a fully-qualified network string.""" + """Returns a fully-qualified network string.""" return "projects/{project}/global/networks/{network}".format( project=project, network=network, ) @staticmethod def parse_network_path(path: str) -> Dict[str, str]: - """Parse a network path into its component segments.""" + """Parses a network path into its component segments.""" m = re.match( r"^projects/(?P.+?)/global/networks/(?P.+?)$", path ) @@ -296,14 +303,14 @@ def parse_network_path(path: str) -> Dict[str, str]: @staticmethod def pipeline_job_path(project: str, location: str, pipeline_job: str,) -> str: - """Return a fully-qualified pipeline_job string.""" + """Returns a fully-qualified pipeline_job string.""" return "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format( project=project, location=location, pipeline_job=pipeline_job, ) @staticmethod def parse_pipeline_job_path(path: str) -> Dict[str, str]: - """Parse a pipeline_job path into its component segments.""" + """Parses a pipeline_job path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/pipelineJobs/(?P.+?)$", path, @@ -314,14 +321,14 @@ def parse_pipeline_job_path(path: str) -> Dict[str, str]: def training_pipeline_path( project: str, location: str, training_pipeline: str, ) -> str: - """Return a fully-qualified training_pipeline string.""" + """Returns a fully-qualified training_pipeline string.""" return "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format( project=project, location=location, training_pipeline=training_pipeline, ) @staticmethod def parse_training_pipeline_path(path: str) -> Dict[str, str]: - """Parse a training_pipeline path into its component segments.""" + """Parses a training_pipeline path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/trainingPipelines/(?P.+?)$", path, @@ -330,7 +337,7 @@ def parse_training_pipeline_path(path: str) -> Dict[str, str]: @staticmethod def common_billing_account_path(billing_account: str,) -> str: - """Return a fully-qualified billing_account string.""" + """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -343,7 +350,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: @staticmethod def common_folder_path(folder: str,) -> str: - """Return a fully-qualified folder string.""" + """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod @@ -354,7 +361,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: @staticmethod def common_organization_path(organization: str,) -> str: - """Return a fully-qualified organization string.""" + """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod @@ -365,7 +372,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: @staticmethod def common_project_path(project: str,) -> str: - """Return a fully-qualified project string.""" + """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod @@ -376,7 +383,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]: @staticmethod def common_location_path(project: str, location: str,) -> str: - """Return a fully-qualified location string.""" + """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -395,7 +402,7 @@ def __init__( client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the pipeline service client. + """Instantiates the pipeline service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -450,9 +457,10 @@ def __init__( client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() - client_cert_source_func = ( - mtls.default_client_cert_source() if is_mtls else None - ) + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -464,12 +472,14 @@ def __init__( elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": - api_endpoint = ( - self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT - ) + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" ) # Save or instantiate the transport. @@ -484,8 +494,8 @@ def __init__( ) if client_options.scopes: raise ValueError( - "When providing a transport instance, " - "provide its scopes directly." + "When providing a transport instance, provide its scopes " + "directly." ) self._transport = transport else: @@ -542,10 +552,10 @@ def create_training_pipeline( google.cloud.aiplatform_v1beta1.types.TrainingPipeline: The TrainingPipeline orchestrates tasks associated with training a Model. It always executes the training task, and optionally may - also export data from AI Platform's Dataset which + also export data from Vertex AI's Dataset which becomes the training input, [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] - the Model to AI Platform, and evaluate the Model. + the Model to Vertex AI, and evaluate the Model. """ # Create or coerce a protobuf request object. @@ -620,10 +630,10 @@ def get_training_pipeline( google.cloud.aiplatform_v1beta1.types.TrainingPipeline: The TrainingPipeline orchestrates tasks associated with training a Model. It always executes the training task, and optionally may - also export data from AI Platform's Dataset which + also export data from Vertex AI's Dataset which becomes the training input, [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] - the Model to AI Platform, and evaluate the Model. + the Model to Vertex AI, and evaluate the Model. """ # Create or coerce a protobuf request object. diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/pagers.py index c56f01985c..c8b8f5bf96 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/pagers.py @@ -119,7 +119,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -249,7 +249,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py index 029bd62656..6698843a9a 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py @@ -40,7 +40,10 @@ class PipelineServiceGrpcTransport(PipelineServiceTransport): """gRPC backend transport for PipelineService. - A service for creating and managing AI Platform's pipelines. + A service for creating and managing Vertex AI's pipelines. This + includes both ``TrainingPipeline`` resources (used for AutoML and + custom training) and ``PipelineJob`` resources (used for Vertex + Pipelines). This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py index c12da6ea8c..7e35c41b62 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py @@ -42,7 +42,10 @@ class PipelineServiceGrpcAsyncIOTransport(PipelineServiceTransport): """gRPC AsyncIO backend transport for PipelineService. - A service for creating and managing AI Platform's pipelines. + A service for creating and managing Vertex AI's pipelines. This + includes both ``TrainingPipeline`` resources (used for AutoML and + custom training) and ``PipelineJob`` resources (used for Vertex + Pipelines). This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py index 9872aa3fb3..0ea7d71bac 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py @@ -71,7 +71,8 @@ class PredictionServiceAsyncClient: @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -86,7 +87,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -103,7 +104,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> PredictionServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: PredictionServiceTransport: The transport used by the client instance. @@ -122,7 +123,7 @@ def __init__( client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the prediction service client. + """Instantiates the prediction service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py index c7852e2805..29b849b09c 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py @@ -55,7 +55,7 @@ class PredictionServiceClientMeta(type): def get_transport_class( cls, label: str = None, ) -> Type[PredictionServiceTransport]: - """Return an appropriate transport class. + """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is @@ -78,7 +78,8 @@ class PredictionServiceClient(metaclass=PredictionServiceClientMeta): @staticmethod def _get_default_mtls_endpoint(api_endpoint): - """Convert api endpoint to mTLS endpoint. + """Converts api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: @@ -112,7 +113,8 @@ def _get_default_mtls_endpoint(api_endpoint): @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -129,7 +131,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -148,23 +150,24 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> PredictionServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: - PredictionServiceTransport: The transport used by the client instance. + PredictionServiceTransport: The transport used by the client + instance. """ return self._transport @staticmethod def endpoint_path(project: str, location: str, endpoint: str,) -> str: - """Return a fully-qualified endpoint string.""" + """Returns a fully-qualified endpoint string.""" return "projects/{project}/locations/{location}/endpoints/{endpoint}".format( project=project, location=location, endpoint=endpoint, ) @staticmethod def parse_endpoint_path(path: str) -> Dict[str, str]: - """Parse a endpoint path into its component segments.""" + """Parses a endpoint path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path, @@ -173,7 +176,7 @@ def parse_endpoint_path(path: str) -> Dict[str, str]: @staticmethod def common_billing_account_path(billing_account: str,) -> str: - """Return a fully-qualified billing_account string.""" + """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -186,7 +189,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: @staticmethod def common_folder_path(folder: str,) -> str: - """Return a fully-qualified folder string.""" + """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod @@ -197,7 +200,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: @staticmethod def common_organization_path(organization: str,) -> str: - """Return a fully-qualified organization string.""" + """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod @@ -208,7 +211,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: @staticmethod def common_project_path(project: str,) -> str: - """Return a fully-qualified project string.""" + """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod @@ -219,7 +222,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]: @staticmethod def common_location_path(project: str, location: str,) -> str: - """Return a fully-qualified location string.""" + """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -238,7 +241,7 @@ def __init__( client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the prediction service client. + """Instantiates the prediction service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -293,9 +296,10 @@ def __init__( client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() - client_cert_source_func = ( - mtls.default_client_cert_source() if is_mtls else None - ) + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -307,12 +311,14 @@ def __init__( elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": - api_endpoint = ( - self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT - ) + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" ) # Save or instantiate the transport. @@ -327,8 +333,8 @@ def __init__( ) if client_options.scopes: raise ValueError( - "When providing a transport instance, " - "provide its scopes directly." + "When providing a transport instance, provide its scopes " + "directly." ) self._transport = transport else: diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py index 9bbde22def..277938e3cd 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py @@ -89,7 +89,8 @@ class SpecialistPoolServiceAsyncClient: @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -104,7 +105,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -121,7 +122,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> SpecialistPoolServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: SpecialistPoolServiceTransport: The transport used by the client instance. @@ -141,7 +142,7 @@ def __init__( client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the specialist pool service client. + """Instantiates the specialist pool service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py index 8b5a23c39f..c26bb7b712 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py @@ -61,7 +61,7 @@ class SpecialistPoolServiceClientMeta(type): def get_transport_class( cls, label: str = None, ) -> Type[SpecialistPoolServiceTransport]: - """Return an appropriate transport class. + """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is @@ -90,7 +90,8 @@ class SpecialistPoolServiceClient(metaclass=SpecialistPoolServiceClientMeta): @staticmethod def _get_default_mtls_endpoint(api_endpoint): - """Convert api endpoint to mTLS endpoint. + """Converts api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: @@ -124,7 +125,8 @@ def _get_default_mtls_endpoint(api_endpoint): @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -141,7 +143,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -160,23 +162,24 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> SpecialistPoolServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: - SpecialistPoolServiceTransport: The transport used by the client instance. + SpecialistPoolServiceTransport: The transport used by the client + instance. """ return self._transport @staticmethod def specialist_pool_path(project: str, location: str, specialist_pool: str,) -> str: - """Return a fully-qualified specialist_pool string.""" + """Returns a fully-qualified specialist_pool string.""" return "projects/{project}/locations/{location}/specialistPools/{specialist_pool}".format( project=project, location=location, specialist_pool=specialist_pool, ) @staticmethod def parse_specialist_pool_path(path: str) -> Dict[str, str]: - """Parse a specialist_pool path into its component segments.""" + """Parses a specialist_pool path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/specialistPools/(?P.+?)$", path, @@ -185,7 +188,7 @@ def parse_specialist_pool_path(path: str) -> Dict[str, str]: @staticmethod def common_billing_account_path(billing_account: str,) -> str: - """Return a fully-qualified billing_account string.""" + """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -198,7 +201,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: @staticmethod def common_folder_path(folder: str,) -> str: - """Return a fully-qualified folder string.""" + """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod @@ -209,7 +212,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: @staticmethod def common_organization_path(organization: str,) -> str: - """Return a fully-qualified organization string.""" + """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod @@ -220,7 +223,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: @staticmethod def common_project_path(project: str,) -> str: - """Return a fully-qualified project string.""" + """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod @@ -231,7 +234,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]: @staticmethod def common_location_path(project: str, location: str,) -> str: - """Return a fully-qualified location string.""" + """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -250,7 +253,7 @@ def __init__( client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the specialist pool service client. + """Instantiates the specialist pool service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -305,9 +308,10 @@ def __init__( client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() - client_cert_source_func = ( - mtls.default_client_cert_source() if is_mtls else None - ) + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -319,12 +323,14 @@ def __init__( elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": - api_endpoint = ( - self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT - ) + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" ) # Save or instantiate the transport. @@ -339,8 +345,8 @@ def __init__( ) if client_options.scopes: raise ValueError( - "When providing a transport instance, " - "provide its scopes directly." + "When providing a transport instance, provide its scopes " + "directly." ) self._transport = transport else: diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/pagers.py index cba513a4b9..0420d53e9e 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/pagers.py @@ -118,7 +118,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py index 347535c785..692a769805 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py @@ -108,7 +108,8 @@ class TensorboardServiceAsyncClient: @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -123,7 +124,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -140,7 +141,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> TensorboardServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: TensorboardServiceTransport: The transport used by the client instance. @@ -160,7 +161,7 @@ def __init__( client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the tensorboard service client. + """Instantiates the tensorboard service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py index 4899e0e60c..045c69b7d0 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py @@ -74,7 +74,7 @@ class TensorboardServiceClientMeta(type): def get_transport_class( cls, label: str = None, ) -> Type[TensorboardServiceTransport]: - """Return an appropriate transport class. + """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is @@ -97,7 +97,8 @@ class TensorboardServiceClient(metaclass=TensorboardServiceClientMeta): @staticmethod def _get_default_mtls_endpoint(api_endpoint): - """Convert api endpoint to mTLS endpoint. + """Converts api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: @@ -131,7 +132,8 @@ def _get_default_mtls_endpoint(api_endpoint): @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -148,7 +150,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -167,23 +169,24 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> TensorboardServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: - TensorboardServiceTransport: The transport used by the client instance. + TensorboardServiceTransport: The transport used by the client + instance. """ return self._transport @staticmethod def tensorboard_path(project: str, location: str, tensorboard: str,) -> str: - """Return a fully-qualified tensorboard string.""" + """Returns a fully-qualified tensorboard string.""" return "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format( project=project, location=location, tensorboard=tensorboard, ) @staticmethod def parse_tensorboard_path(path: str) -> Dict[str, str]: - """Parse a tensorboard path into its component segments.""" + """Parses a tensorboard path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)$", path, @@ -194,7 +197,7 @@ def parse_tensorboard_path(path: str) -> Dict[str, str]: def tensorboard_experiment_path( project: str, location: str, tensorboard: str, experiment: str, ) -> str: - """Return a fully-qualified tensorboard_experiment string.""" + """Returns a fully-qualified tensorboard_experiment string.""" return "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}".format( project=project, location=location, @@ -204,7 +207,7 @@ def tensorboard_experiment_path( @staticmethod def parse_tensorboard_experiment_path(path: str) -> Dict[str, str]: - """Parse a tensorboard_experiment path into its component segments.""" + """Parses a tensorboard_experiment path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)/experiments/(?P.+?)$", path, @@ -215,7 +218,7 @@ def parse_tensorboard_experiment_path(path: str) -> Dict[str, str]: def tensorboard_run_path( project: str, location: str, tensorboard: str, experiment: str, run: str, ) -> str: - """Return a fully-qualified tensorboard_run string.""" + """Returns a fully-qualified tensorboard_run string.""" return "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}".format( project=project, location=location, @@ -226,7 +229,7 @@ def tensorboard_run_path( @staticmethod def parse_tensorboard_run_path(path: str) -> Dict[str, str]: - """Parse a tensorboard_run path into its component segments.""" + """Parses a tensorboard_run path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)/experiments/(?P.+?)/runs/(?P.+?)$", path, @@ -242,7 +245,7 @@ def tensorboard_time_series_path( run: str, time_series: str, ) -> str: - """Return a fully-qualified tensorboard_time_series string.""" + """Returns a fully-qualified tensorboard_time_series string.""" return "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}".format( project=project, location=location, @@ -254,7 +257,7 @@ def tensorboard_time_series_path( @staticmethod def parse_tensorboard_time_series_path(path: str) -> Dict[str, str]: - """Parse a tensorboard_time_series path into its component segments.""" + """Parses a tensorboard_time_series path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)/experiments/(?P.+?)/runs/(?P.+?)/timeSeries/(?P.+?)$", path, @@ -263,7 +266,7 @@ def parse_tensorboard_time_series_path(path: str) -> Dict[str, str]: @staticmethod def common_billing_account_path(billing_account: str,) -> str: - """Return a fully-qualified billing_account string.""" + """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -276,7 +279,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: @staticmethod def common_folder_path(folder: str,) -> str: - """Return a fully-qualified folder string.""" + """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod @@ -287,7 +290,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: @staticmethod def common_organization_path(organization: str,) -> str: - """Return a fully-qualified organization string.""" + """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod @@ -298,7 +301,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: @staticmethod def common_project_path(project: str,) -> str: - """Return a fully-qualified project string.""" + """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod @@ -309,7 +312,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]: @staticmethod def common_location_path(project: str, location: str,) -> str: - """Return a fully-qualified location string.""" + """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -328,7 +331,7 @@ def __init__( client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the tensorboard service client. + """Instantiates the tensorboard service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -383,9 +386,10 @@ def __init__( client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() - client_cert_source_func = ( - mtls.default_client_cert_source() if is_mtls else None - ) + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -397,12 +401,14 @@ def __init__( elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": - api_endpoint = ( - self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT - ) + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" ) # Save or instantiate the transport. @@ -417,8 +423,8 @@ def __init__( ) if client_options.scopes: raise ValueError( - "When providing a transport instance, " - "provide its scopes directly." + "When providing a transport instance, provide its scopes " + "directly." ) self._transport = transport else: diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/pagers.py index 8200c9c237..2e3db9f8d8 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/pagers.py @@ -120,7 +120,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -252,7 +252,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -384,7 +384,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -516,7 +516,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -654,7 +654,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py index d76f7aa64e..5ba05a0d89 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py @@ -39,7 +39,7 @@ class VizierServiceAsyncClient: - """Cloud AI Platform Vizier API. + """Vertex Vizier API. Vizier service is a GCP service to solve blackbox optimization problems, such as tuning machine learning hyperparameters and searching over deep learning architectures. @@ -83,7 +83,8 @@ class VizierServiceAsyncClient: @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -98,7 +99,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -115,7 +116,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> VizierServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: VizierServiceTransport: The transport used by the client instance. @@ -134,7 +135,7 @@ def __init__( client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the vizier service client. + """Instantiates the vizier service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -550,9 +551,9 @@ async def suggest_trials( metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Adds one or more Trials to a Study, with parameter values - suggested by AI Platform Vizier. Returns a long-running - operation associated with the generation of Trial suggestions. - When this long-running operation succeeds, it will contain a + suggested by Vertex Vizier. Returns a long-running operation + associated with the generation of Trial suggestions. When this + long-running operation succeeds, it will contain a [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. Args: diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py index c6f834add7..4b3611e6c4 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py @@ -55,7 +55,7 @@ class VizierServiceClientMeta(type): _transport_registry["grpc_asyncio"] = VizierServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[VizierServiceTransport]: - """Return an appropriate transport class. + """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is @@ -74,7 +74,7 @@ def get_transport_class(cls, label: str = None,) -> Type[VizierServiceTransport] class VizierServiceClient(metaclass=VizierServiceClientMeta): - """Cloud AI Platform Vizier API. + """Vertex Vizier API. Vizier service is a GCP service to solve blackbox optimization problems, such as tuning machine learning hyperparameters and searching over deep learning architectures. @@ -82,7 +82,8 @@ class VizierServiceClient(metaclass=VizierServiceClientMeta): @staticmethod def _get_default_mtls_endpoint(api_endpoint): - """Convert api endpoint to mTLS endpoint. + """Converts api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: @@ -116,7 +117,8 @@ def _get_default_mtls_endpoint(api_endpoint): @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -133,7 +135,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -152,23 +154,24 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> VizierServiceTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: - VizierServiceTransport: The transport used by the client instance. + VizierServiceTransport: The transport used by the client + instance. """ return self._transport @staticmethod def custom_job_path(project: str, location: str, custom_job: str,) -> str: - """Return a fully-qualified custom_job string.""" + """Returns a fully-qualified custom_job string.""" return "projects/{project}/locations/{location}/customJobs/{custom_job}".format( project=project, location=location, custom_job=custom_job, ) @staticmethod def parse_custom_job_path(path: str) -> Dict[str, str]: - """Parse a custom_job path into its component segments.""" + """Parses a custom_job path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", path, @@ -177,14 +180,14 @@ def parse_custom_job_path(path: str) -> Dict[str, str]: @staticmethod def study_path(project: str, location: str, study: str,) -> str: - """Return a fully-qualified study string.""" + """Returns a fully-qualified study string.""" return "projects/{project}/locations/{location}/studies/{study}".format( project=project, location=location, study=study, ) @staticmethod def parse_study_path(path: str) -> Dict[str, str]: - """Parse a study path into its component segments.""" + """Parses a study path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/studies/(?P.+?)$", path, @@ -193,14 +196,14 @@ def parse_study_path(path: str) -> Dict[str, str]: @staticmethod def trial_path(project: str, location: str, study: str, trial: str,) -> str: - """Return a fully-qualified trial string.""" + """Returns a fully-qualified trial string.""" return "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format( project=project, location=location, study=study, trial=trial, ) @staticmethod def parse_trial_path(path: str) -> Dict[str, str]: - """Parse a trial path into its component segments.""" + """Parses a trial path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/studies/(?P.+?)/trials/(?P.+?)$", path, @@ -209,7 +212,7 @@ def parse_trial_path(path: str) -> Dict[str, str]: @staticmethod def common_billing_account_path(billing_account: str,) -> str: - """Return a fully-qualified billing_account string.""" + """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -222,7 +225,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: @staticmethod def common_folder_path(folder: str,) -> str: - """Return a fully-qualified folder string.""" + """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod @@ -233,7 +236,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: @staticmethod def common_organization_path(organization: str,) -> str: - """Return a fully-qualified organization string.""" + """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod @@ -244,7 +247,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: @staticmethod def common_project_path(project: str,) -> str: - """Return a fully-qualified project string.""" + """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod @@ -255,7 +258,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]: @staticmethod def common_location_path(project: str, location: str,) -> str: - """Return a fully-qualified location string.""" + """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -274,7 +277,7 @@ def __init__( client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the vizier service client. + """Instantiates the vizier service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -329,9 +332,10 @@ def __init__( client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() - client_cert_source_func = ( - mtls.default_client_cert_source() if is_mtls else None - ) + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -343,12 +347,14 @@ def __init__( elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": - api_endpoint = ( - self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT - ) + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" ) # Save or instantiate the transport. @@ -363,8 +369,8 @@ def __init__( ) if client_options.scopes: raise ValueError( - "When providing a transport instance, " - "provide its scopes directly." + "When providing a transport instance, provide its scopes " + "directly." ) self._transport = transport else: @@ -756,9 +762,9 @@ def suggest_trials( metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Adds one or more Trials to a Study, with parameter values - suggested by AI Platform Vizier. Returns a long-running - operation associated with the generation of Trial suggestions. - When this long-running operation succeeds, it will contain a + suggested by Vertex Vizier. Returns a long-running operation + associated with the generation of Trial suggestions. When this + long-running operation succeeds, it will contain a [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. Args: diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/pagers.py index 39d956e6be..148469ef67 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/pagers.py @@ -116,7 +116,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -244,7 +244,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py index 48aac6a6c1..b896fcfd5e 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py @@ -36,7 +36,7 @@ class VizierServiceGrpcTransport(VizierServiceTransport): """gRPC backend transport for VizierService. - Cloud AI Platform Vizier API. + Vertex Vizier API. Vizier service is a GCP service to solve blackbox optimization problems, such as tuning machine learning hyperparameters and searching over deep learning architectures. @@ -384,9 +384,9 @@ def suggest_trials( r"""Return a callable for the suggest trials method over gRPC. Adds one or more Trials to a Study, with parameter values - suggested by AI Platform Vizier. Returns a long-running - operation associated with the generation of Trial suggestions. - When this long-running operation succeeds, it will contain a + suggested by Vertex Vizier. Returns a long-running operation + associated with the generation of Trial suggestions. When this + long-running operation succeeds, it will contain a [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. Returns: diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py index f26ef1a02c..5e3e810e0c 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py @@ -38,7 +38,7 @@ class VizierServiceGrpcAsyncIOTransport(VizierServiceTransport): """gRPC AsyncIO backend transport for VizierService. - Cloud AI Platform Vizier API. + Vertex Vizier API. Vizier service is a GCP service to solve blackbox optimization problems, such as tuning machine learning hyperparameters and searching over deep learning architectures. @@ -394,9 +394,9 @@ def suggest_trials( r"""Return a callable for the suggest trials method over gRPC. Adds one or more Trials to a Study, with parameter values - suggested by AI Platform Vizier. Returns a long-running - operation associated with the generation of Trial suggestions. - When this long-running operation succeeds, it will contain a + suggested by Vertex Vizier. Returns a long-running operation + associated with the generation of Trial suggestions. When this + long-running operation succeeds, it will contain a [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. Returns: diff --git a/google/cloud/aiplatform_v1beta1/types/annotation.py b/google/cloud/aiplatform_v1beta1/types/annotation.py index 07c5769df3..4a89bb5682 100644 --- a/google/cloud/aiplatform_v1beta1/types/annotation.py +++ b/google/cloud/aiplatform_v1beta1/types/annotation.py @@ -37,8 +37,8 @@ class Annotation(proto.Message): describing [payload][google.cloud.aiplatform.v1beta1.Annotation.payload]. The schema is defined as an `OpenAPI 3.0.2 Schema - Object `__. The schema files - that can be used here are found in + Object `__. + The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the chosen schema must be consistent with the parent Dataset's diff --git a/google/cloud/aiplatform_v1beta1/types/artifact.py b/google/cloud/aiplatform_v1beta1/types/artifact.py index 88914edc58..1cf5c15350 100644 --- a/google/cloud/aiplatform_v1beta1/types/artifact.py +++ b/google/cloud/aiplatform_v1beta1/types/artifact.py @@ -61,9 +61,9 @@ class Artifact(proto.Message): The state of this Artifact. This is a property of the Artifact, and does not imply or capture any ongoing process. This property is - managed by clients (such as AI Platform - Pipelines), and the system does not prescribe or - check the validity of state transitions. + managed by clients (such as Vertex Pipelines), + and the system does not prescribe or check the + validity of state transitions. schema_title (str): The title of the schema describing the metadata. diff --git a/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py b/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py index 1e0d364c4f..cc0d88d68b 100644 --- a/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py +++ b/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py @@ -91,8 +91,7 @@ class BatchPredictionJob(proto.Message): Immutable. Parameters configuring the batch behavior. Currently only applicable when [dedicated_resources][google.cloud.aiplatform.v1beta1.BatchPredictionJob.dedicated_resources] - are used (in other cases AI Platform does the tuning - itself). + are used (in other cases Vertex AI does the tuning itself). generate_explanation (bool): Generate explanation with the batch prediction results. @@ -282,7 +281,7 @@ class OutputConfig(proto.Message): ```google.rpc.Status`` `__ represented as a STRUCT, and containing only ``code`` and ``message``. predictions_format (str): - Required. The format in which AI Platform gives the + Required. The format in which Vertex AI gives the predictions, must be one of the [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] [supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats]. diff --git a/google/cloud/aiplatform_v1beta1/types/custom_job.py b/google/cloud/aiplatform_v1beta1/types/custom_job.py index 3900a6bd26..9932a3c9ac 100644 --- a/google/cloud/aiplatform_v1beta1/types/custom_job.py +++ b/google/cloud/aiplatform_v1beta1/types/custom_job.py @@ -114,11 +114,12 @@ class CustomJobSpec(proto.Message): scheduling (google.cloud.aiplatform_v1beta1.types.Scheduling): Scheduling options for a CustomJob. service_account (str): - Specifies the service account for workload - run-as account. Users submitting jobs must have - act-as permission on this run-as account. If - unspecified, the AI Platform Custom Code Service - Agent for the CustomJob's project is used. + Specifies the service account for workload run-as account. + Users submitting jobs must have act-as permission on this + run-as account. If unspecified, the `AI Platform Custom Code + Service + Agent `__ + for the CustomJob's project is used. network (str): The full name of the Compute Engine `network `__ @@ -141,9 +142,8 @@ class CustomJobSpec(proto.Message): name [id][google.cloud.aiplatform.v1beta1.Trial.id] under its parent HyperparameterTuningJob's baseOutputDirectory. - The following AI Platform environment variables will be - passed to containers or python modules when this field is - set: + The following Vertex AI environment variables will be passed + to containers or python modules when this field is set: For CustomJob: @@ -162,7 +162,7 @@ class CustomJobSpec(proto.Message): - AIP_TENSORBOARD_LOG_DIR = ``//logs/`` tensorboard (str): - Optional. The name of an AI Platform + Optional. The name of a Vertex AI [Tensorboard][google.cloud.aiplatform.v1beta1.Tensorboard] resource to which this CustomJob will upload Tensorboard logs. Format: @@ -239,11 +239,11 @@ class PythonPackageSpec(proto.Message): Attributes: executor_image_uri (str): Required. The URI of a container image in Artifact Registry - that will run the provided Python package. AI Platform + that will run the provided Python package. Vertex AI provides a wide range of executor images with pre-installed packages to meet users' various use cases. See the list of `pre-built containers for - training `__. + training `__. You must use an image from this list. package_uris (Sequence[str]): Required. The Google Cloud Storage location diff --git a/google/cloud/aiplatform_v1beta1/types/dataset.py b/google/cloud/aiplatform_v1beta1/types/dataset.py index 1d659c3bd0..b43279a2a8 100644 --- a/google/cloud/aiplatform_v1beta1/types/dataset.py +++ b/google/cloud/aiplatform_v1beta1/types/dataset.py @@ -126,7 +126,7 @@ class ImportDataConfig(proto.Message): Storage describing the import format. Validation will be done against the schema. The schema is defined as an `OpenAPI 3.0.2 Schema - Object `__. + Object `__. """ gcs_source = proto.Field( diff --git a/google/cloud/aiplatform_v1beta1/types/dataset_service.py b/google/cloud/aiplatform_v1beta1/types/dataset_service.py index b1458c7a78..09317f42a1 100644 --- a/google/cloud/aiplatform_v1beta1/types/dataset_service.py +++ b/google/cloud/aiplatform_v1beta1/types/dataset_service.py @@ -104,7 +104,7 @@ class UpdateDatasetRequest(proto.Message): update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The update mask applies to the resource. For the ``FieldMask`` definition, see - `FieldMask `__. + [google.protobuf.FieldMask][google.protobuf.FieldMask]. Updatable fields: - ``display_name`` diff --git a/google/cloud/aiplatform_v1beta1/types/endpoint_service.py b/google/cloud/aiplatform_v1beta1/types/endpoint_service.py index 943c05c6e8..69f0282c34 100644 --- a/google/cloud/aiplatform_v1beta1/types/endpoint_service.py +++ b/google/cloud/aiplatform_v1beta1/types/endpoint_service.py @@ -169,7 +169,7 @@ class UpdateEndpointRequest(proto.Message): resource on the server. update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The update mask applies to the resource. See - `FieldMask `__. + [google.protobuf.FieldMask][google.protobuf.FieldMask]. """ endpoint = proto.Field(proto.MESSAGE, number=1, message=gca_endpoint.Endpoint,) diff --git a/google/cloud/aiplatform_v1beta1/types/execution.py b/google/cloud/aiplatform_v1beta1/types/execution.py index a564e1122b..a2762f1a1c 100644 --- a/google/cloud/aiplatform_v1beta1/types/execution.py +++ b/google/cloud/aiplatform_v1beta1/types/execution.py @@ -37,9 +37,9 @@ class Execution(proto.Message): The state of this Execution. This is a property of the Execution, and does not imply or capture any ongoing process. This property is - managed by clients (such as AI Platform - Pipelines) and the system does not prescribe or - check the validity of state transitions. + managed by clients (such as Vertex Pipelines) + and the system does not prescribe or check the + validity of state transitions. etag (str): An eTag used to perform consistent read- odify-write updates. If not set, a blind @@ -69,7 +69,7 @@ class Execution(proto.Message): identify schemas within the local metadata store. schema_version (str): - The version of the schema in schema_name to use. + The version of the schema in ``schema_title`` to use. Schema title and version is expected to be registered in earlier Create Schema calls. And both are used together as diff --git a/google/cloud/aiplatform_v1beta1/types/explanation.py b/google/cloud/aiplatform_v1beta1/types/explanation.py index d1c94d1d83..8091a4b1d1 100644 --- a/google/cloud/aiplatform_v1beta1/types/explanation.py +++ b/google/cloud/aiplatform_v1beta1/types/explanation.py @@ -219,7 +219,7 @@ class Attribution(proto.Message): might reduce the error. See `this - introduction `__ + introduction `__ for more information. output_name (str): Output only. Name of the explain output. Specified as the @@ -418,8 +418,8 @@ class SmoothGradConfig(proto.Message): to all the features. Use this field when all features are normalized to have the same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features are normalized to - have 0-mean and 1-variance. For more details about - normalization: https://tinyurl.com/dgc-normalization. + have 0-mean and 1-variance. Learn more about + `normalization `__. For best results the recommended value is about 10% - 20% of the standard deviation of the input feature. Refer to diff --git a/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py b/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py index 70f1a5339c..0c2a926618 100644 --- a/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py +++ b/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py @@ -36,10 +36,10 @@ class ExplanationMetadata(proto.Message): An empty InputMetadata is valid. It describes a text feature which has the name specified as the key in [ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. - The baseline of the empty feature is chosen by AI Platform. + The baseline of the empty feature is chosen by Vertex AI. - For AI Platform provided Tensorflow images, the key can be - any friendly name of the feature. Once specified, + For Vertex AI-provided Tensorflow images, the key can be any + friendly name of the feature. Once specified, [featureAttributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] are keyed by this key (if not grouped with another feature). @@ -48,7 +48,7 @@ class ExplanationMetadata(proto.Message): outputs (Sequence[google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.OutputsEntry]): Required. Map from output names to output metadata. - For AI Platform provided Tensorflow images, keys + For Vertex AI-provided Tensorflow images, keys can be any user defined string that consists of any UTF-8 characters. For custom images, keys are the name of the @@ -60,9 +60,9 @@ class ExplanationMetadata(proto.Message): describing the format of the [feature attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions]. The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. - AutoML tabular Models always have this field populated by AI - Platform. Note: The URI given on output may be different, + Object `__. + AutoML tabular Models always have this field populated by + Vertex AI. Note: The URI given on output may be different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. @@ -73,22 +73,22 @@ class InputMetadata(proto.Message): Fields other than [InputMetadata.input_baselines][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.input_baselines] - are applicable only for Models that are using AI Platform-provided + are applicable only for Models that are using Vertex AI-provided images for Tensorflow. Attributes: input_baselines (Sequence[google.protobuf.struct_pb2.Value]): Baseline inputs for this feature. - If no baseline is specified, AI Platform chooses the - baseline for this feature. If multiple baselines are - specified, AI Platform returns the average attributions - across them in [Attributions.baseline_attribution][]. + If no baseline is specified, Vertex AI chooses the baseline + for this feature. If multiple baselines are specified, + Vertex AI returns the average attributions across them in + [Attributions.baseline_attribution][]. - For AI Platform provided Tensorflow images (both 1.x and - 2.x), the shape of each baseline must match the shape of the - input tensor. If a scalar is provided, we broadcast to the - same shape as the input tensor. + For Vertex AI-provided Tensorflow images (both 1.x and 2.x), + the shape of each baseline must match the shape of the input + tensor. If a scalar is provided, we broadcast to the same + shape as the input tensor. For custom images, the element of the baselines must be in the same format as the feature's input in the @@ -100,7 +100,7 @@ class InputMetadata(proto.Message): [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. input_tensor_name (str): Name of the input tensor for this feature. - Required and is only applicable to AI Platform + Required and is only applicable to Vertex AI- provided images for Tensorflow. encoding (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Encoding): Defines how the feature is encoded into the @@ -141,8 +141,8 @@ class InputMetadata(proto.Message): A list of baselines for the encoded tensor. The shape of each baseline should match the shape of the encoded tensor. If a scalar is - provided, AI Platform broadcast to the same - shape as the encoded tensor. + provided, Vertex AI broadcasts to the same shape + as the encoded tensor. visualization (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization): Visualization configurations for image explanation. diff --git a/google/cloud/aiplatform_v1beta1/types/feature_monitoring_stats.py b/google/cloud/aiplatform_v1beta1/types/feature_monitoring_stats.py index b966a5edb9..932047aaca 100644 --- a/google/cloud/aiplatform_v1beta1/types/feature_monitoring_stats.py +++ b/google/cloud/aiplatform_v1beta1/types/feature_monitoring_stats.py @@ -32,8 +32,8 @@ class FeatureStatsAnomaly(proto.Message): Timestamp of the stats and anomalies always refers to end_time. Raw stats and anomalies are stored in stats_uri or anomaly_uri in the tensorflow defined protos. Field data_stats contains almost - identical information with the raw stats in AI Platform defined - proto, for UI to display. + identical information with the raw stats in Vertex AI defined proto, + for UI to display. Attributes: score (float): diff --git a/google/cloud/aiplatform_v1beta1/types/featurestore_service.py b/google/cloud/aiplatform_v1beta1/types/featurestore_service.py index 747a588046..fcbe2516e6 100644 --- a/google/cloud/aiplatform_v1beta1/types/featurestore_service.py +++ b/google/cloud/aiplatform_v1beta1/types/featurestore_service.py @@ -127,7 +127,6 @@ class ListFeaturestoresRequest(proto.Message): Lists the featurestores that match the filter expression. The following fields are supported: - - ``display_name``: Supports =, != comparisons. - ``create_time``: Supports =, !=, <, >, <=, and >= comparisons. Values must be in RFC 3339 format. - ``update_time``: Supports =, !=, <, >, <=, and >= @@ -161,7 +160,6 @@ class ListFeaturestoresRequest(proto.Message): ascending order. Use "desc" after a field name for descending. Supported Fields: - - ``display_name`` - ``create_time`` - ``update_time`` - ``online_serving_config.fixed_node_count`` @@ -222,10 +220,8 @@ class UpdateFeaturestoreRequest(proto.Message): Updatable fields: - - ``display_name`` - ``labels`` - ``online_serving_config.fixed_node_count`` - - ``retention_policy.online_storage_ttl_days`` """ featurestore = proto.Field( diff --git a/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py b/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py index 1344489d7d..da0f83747a 100644 --- a/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py +++ b/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py @@ -53,7 +53,7 @@ class HyperparameterTuningJob(proto.Message): max_failed_trial_count (int): The number of failed Trials that need to be seen before failing the HyperparameterTuningJob. - If set to 0, AI Platform decides how many Trials + If set to 0, Vertex AI decides how many Trials must fail before the whole job fails. trial_job_spec (google.cloud.aiplatform_v1beta1.types.CustomJobSpec): Required. The spec of a trial job. The same diff --git a/google/cloud/aiplatform_v1beta1/types/index.py b/google/cloud/aiplatform_v1beta1/types/index.py index 47341bbe06..289ef763b8 100644 --- a/google/cloud/aiplatform_v1beta1/types/index.py +++ b/google/cloud/aiplatform_v1beta1/types/index.py @@ -45,7 +45,7 @@ class Index(proto.Message): that is specific to it. Unset if the Index does not have any additional information. The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. + Object `__. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the diff --git a/google/cloud/aiplatform_v1beta1/types/index_endpoint.py b/google/cloud/aiplatform_v1beta1/types/index_endpoint.py index ae7d3cfb6b..1f7a7be1b5 100644 --- a/google/cloud/aiplatform_v1beta1/types/index_endpoint.py +++ b/google/cloud/aiplatform_v1beta1/types/index_endpoint.py @@ -150,7 +150,7 @@ class DeployedIndex(proto.Message): DeployedIndex. automatic_resources (google.cloud.aiplatform_v1beta1.types.AutomaticResources): Optional. A description of resources that the DeployedIndex - uses, which to large degree are decided by AI Platform, and + uses, which to large degree are decided by Vertex AI, and optionally allows only a modest additional configuration. If min_replica_count is not set, the default value is 1. If max_replica_count is not set, the default value is diff --git a/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py b/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py index 7703e02883..591071752a 100644 --- a/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py +++ b/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py @@ -173,7 +173,7 @@ class UpdateIndexEndpointRequest(proto.Message): the resource on the server. update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The update mask applies to the resource. See - `FieldMask `__. + [google.protobuf.FieldMask][google.protobuf.FieldMask]. """ index_endpoint = proto.Field( diff --git a/google/cloud/aiplatform_v1beta1/types/index_service.py b/google/cloud/aiplatform_v1beta1/types/index_service.py index 73335e18e2..c6b5837295 100644 --- a/google/cloud/aiplatform_v1beta1/types/index_service.py +++ b/google/cloud/aiplatform_v1beta1/types/index_service.py @@ -148,7 +148,7 @@ class UpdateIndexRequest(proto.Message): update_mask (google.protobuf.field_mask_pb2.FieldMask): The update mask applies to the resource. For the ``FieldMask`` definition, see - `FieldMask `__. + [google.protobuf.FieldMask][google.protobuf.FieldMask]. """ index = proto.Field(proto.MESSAGE, number=1, message=gca_index.Index,) diff --git a/google/cloud/aiplatform_v1beta1/types/machine_resources.py b/google/cloud/aiplatform_v1beta1/types/machine_resources.py index abd28b68c4..329b22213b 100644 --- a/google/cloud/aiplatform_v1beta1/types/machine_resources.py +++ b/google/cloud/aiplatform_v1beta1/types/machine_resources.py @@ -38,11 +38,13 @@ class MachineSpec(proto.Message): r"""Specification of a single machine. Attributes: machine_type (str): - Immutable. The type of the machine. For the machine types - supported for prediction, see - https://tinyurl.com/aip-docs/predictions/machine-types. For - machine types supported for creating a custom training job, - see https://tinyurl.com/aip-docs/training/configure-compute. + Immutable. The type of the machine. + + See the `list of machine types supported for + prediction `__ + + See the `list of machine types supported for custom + training `__. For [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] @@ -139,7 +141,7 @@ class DedicatedResources(proto.Message): class AutomaticResources(proto.Message): r"""A description of resources that to large degree are decided - by AI Platform, and require only a modest additional + by Vertex AI, and require only a modest additional configuration. Each Model supporting these resources documents its specific guidelines. @@ -164,7 +166,7 @@ class AutomaticResources(proto.Message): its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under - heavy traffic will be assume, though AI Platform + heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number. """ @@ -184,7 +186,7 @@ class BatchDedicatedResources(proto.Message): single machine. starting_replica_count (int): Immutable. The number of machine replicas used at the start - of the batch operation. If not set, AI Platform decides + of the batch operation. If not set, Vertex AI decides starting number, not greater than [max_replica_count][google.cloud.aiplatform.v1beta1.BatchDedicatedResources.max_replica_count] max_replica_count (int): diff --git a/google/cloud/aiplatform_v1beta1/types/metadata_service.py b/google/cloud/aiplatform_v1beta1/types/metadata_service.py index 84b46c1bfd..12b3af0b0f 100644 --- a/google/cloud/aiplatform_v1beta1/types/metadata_service.py +++ b/google/cloud/aiplatform_v1beta1/types/metadata_service.py @@ -285,26 +285,28 @@ class ListArtifactsRequest(proto.Message): define filter query is based on https://google.aip.dev/160. The supported set of filters include the following: - 1. Attributes filtering e.g. display_name = "test" - - Supported fields include: name, display_name, uri, state, - schema_title, create_time and update_time. Time fields, - i.e. create_time and update_time, require values to - specified in RFC-3339 format. e.g. create_time = - "2020-11-19T11:30:00-04:00" - - 2. Metadata field To filter on metadata fields use traversal - operation as follows: metadata.. - e.g. metadata.field_1.number_value = 10.0 - - 3. Context based filtering To filter Artifacts based on the - contexts to which they belong use the function operator - with the full resource name "in_context()" e.g. - in_context("projects//locations//metadataStores//contexts/") + - **Attribute filtering**: For example: + ``display_name = "test"``. Supported fields include: + ``name``, ``display_name``, ``uri``, ``state``, + ``schema_title``, ``create_time``, and ``update_time``. + Time fields, such as ``create_time`` and ``update_time``, + require values specified in RFC-3339 format. For example: + ``create_time = "2020-11-19T11:30:00-04:00"`` + - **Metadata field**: To filter on metadata fields use + traversal operation as follows: + ``metadata..``. For example: + ``metadata.field_1.number_value = 10.0`` + - **Context based filtering**: To filter Artifacts based on + the contexts to which they belong, use the function + operator with the full resource name + ``in_context()``. For example: + ``in_context("projects//locations//metadataStores//contexts/")`` Each of the above supported filter types can be combined - together using Logical operators (AND & OR). e.g. - display_name = "test" AND metadata.field1.bool_value = true. + together using logical operators (``AND`` & ``OR``). + + For example: + ``display_name = "test" AND metadata.field1.bool_value = true``. """ parent = proto.Field(proto.STRING, number=1,) @@ -381,7 +383,7 @@ class CreateContextRequest(proto.Message): Required. The Context to create. context_id (str): The {context} portion of the resource name with the format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}. If not provided, the Context's ID will be a UUID generated by the service. Must be 4-128 characters in length. Valid characters are /[a-z][0-9]-/. Must be unique across all @@ -431,7 +433,40 @@ class ListContextsRequest(proto.Message): the call that provided the page token. (Otherwise the request will fail with INVALID_ARGUMENT error.) filter (str): + Filter specifying the boolean condition for the Contexts to + satisfy in order to be part of the result set. The syntax to + define filter query is based on https://google.aip.dev/160. + Following are the supported set of filters: + + - **Attribute filtering**: For example: + ``display_name = "test"``. Supported fields include: + ``name``, ``display_name``, ``schema_title``, + ``create_time``, and ``update_time``. Time fields, such + as ``create_time`` and ``update_time``, require values + specified in RFC-3339 format. For example: + ``create_time = "2020-11-19T11:30:00-04:00"``. + + - **Metadata field**: To filter on metadata fields use + traversal operation as follows: + ``metadata..``. For example: + ``metadata.field_1.number_value = 10.0``. + + - **Parent Child filtering**: To filter Contexts based on + parent-child relationship use the HAS operator as + follows: + + :: + + parent_contexts: + "projects//locations//metadataStores//contexts/" + child_contexts: + "projects//locations//metadataStores//contexts/" + + Each of the above supported filters can be combined together + using logical operators (``AND`` & ``OR``). + For example: + ``display_name = "test" AND metadata.field1.bool_value = true``. """ parent = proto.Field(proto.STRING, number=1,) @@ -526,9 +561,14 @@ class AddContextArtifactsAndExecutionsRequest(proto.Message): artifacts (Sequence[str]): The resource names of the Artifacts to attribute to the Context. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} executions (Sequence[str]): The resource names of the Executions to associate with the Context. + + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} """ context = proto.Field(proto.STRING, number=1,) @@ -549,7 +589,8 @@ class AddContextChildrenRequest(proto.Message): Attributes: context (str): Required. The resource name of the parent - Context. Format: + Context. + Format: projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} child_contexts (Sequence[str]): The resource names of the child Contexts. @@ -647,7 +688,7 @@ class ListExecutionsRequest(proto.Message): When paginating, all other provided parameters must match the call that provided the page token. (Otherwise the - request will fail with INVALID_ARGUMENT error.) + request will fail with an INVALID_ARGUMENT error.) filter (str): Filter specifying the boolean condition for the Executions to satisfy in order to be part of the result set. The syntax @@ -655,26 +696,26 @@ class ListExecutionsRequest(proto.Message): https://google.aip.dev/160. Following are the supported set of filters: - 1. Attributes filtering e.g. display_name = "test" - - supported fields include: name, display_name, state, - schema_title, create_time and update_time. Time fields, - i.e. create_time and update_time, require values to - specified in RFC-3339 format. e.g. create_time = - "2020-11-19T11:30:00-04:00" - - 2. Metadata field To filter on metadata fields use traversal - operation as follows: metadata.. - e.g. metadata.field_1.number_value = 10.0 - - 3. Context based filtering To filter Executions based on the - contexts to which they belong use the function operator - with the full resource name "in_context()" e.g. - in_context("projects//locations//metadataStores//contexts/") + - **Attribute filtering**: For example: + ``display_name = "test"``. Supported fields include: + ``name``, ``display_name``, ``state``, ``schema_title``, + ``create_time``, and ``update_time``. Time fields, such + as ``create_time`` and ``update_time``, require values + specified in RFC-3339 format. For example: + ``create_time = "2020-11-19T11:30:00-04:00"``. + - **Metadata field**: To filter on metadata fields use + traversal operation as follows: + ``metadata..`` For example: + ``metadata.field_1.number_value = 10.0`` + - **Context based filtering**: To filter Executions based + on the contexts to which they belong use the function + operator with the full resource name: + ``in_context()``. For example: + ``in_context("projects//locations//metadataStores//contexts/")`` Each of the above supported filters can be combined together - using Logical operators (AND & OR). e.g. display_name = - "test" AND metadata.field1.bool_value = true. + using logical operators (``AND`` & ``OR``). For example: + ``display_name = "test" AND metadata.field1.bool_value = true``. """ parent = proto.Field(proto.STRING, number=1,) @@ -839,7 +880,7 @@ class ListMetadataSchemasRequest(proto.Message): page_token (str): A page token, received from a previous [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas] - call. Provide this to retrieve the subsequent page. + call. Provide this to retrieve the next page. When paginating, all other provided parameters must match the call that provided the page token. (Otherwise the @@ -906,21 +947,23 @@ class QueryArtifactLineageSubgraphRequest(proto.Message): https://google.aip.dev/160. The supported set of filters include the following: - 1. Attributes filtering e.g. display_name = "test" - - supported fields include: name, display_name, uri, state, - schema_title, create_time and update_time. Time fields, - i.e. create_time and update_time, require values to - specified in RFC-3339 format. e.g. create_time = - "2020-11-19T11:30:00-04:00" - - 2. Metadata field To filter on metadata fields use traversal - operation as follows: metadata.. - e.g. metadata.field_1.number_value = 10.0 + - **Attribute filtering**: For example: + ``display_name = "test"`` Supported fields include: + ``name``, ``display_name``, ``uri``, ``state``, + ``schema_title``, ``create_time``, and ``update_time``. + Time fields, such as ``create_time`` and ``update_time``, + require values specified in RFC-3339 format. For example: + ``create_time = "2020-11-19T11:30:00-04:00"`` + - **Metadata field**: To filter on metadata fields use + traversal operation as follows: + ``metadata..``. For example: + ``metadata.field_1.number_value = 10.0`` Each of the above supported filter types can be combined - together using Logical operators (AND & OR). e.g. - display_name = "test" AND metadata.field1.bool_value = true. + together using logical operators (``AND`` & ``OR``). + + For example: + ``display_name = "test" AND metadata.field1.bool_value = true``. """ artifact = proto.Field(proto.STRING, number=1,) diff --git a/google/cloud/aiplatform_v1beta1/types/metadata_store.py b/google/cloud/aiplatform_v1beta1/types/metadata_store.py index ac39fde5f0..efeec98f98 100644 --- a/google/cloud/aiplatform_v1beta1/types/metadata_store.py +++ b/google/cloud/aiplatform_v1beta1/types/metadata_store.py @@ -39,10 +39,10 @@ class MetadataStore(proto.Message): Output only. Timestamp when this MetadataStore was last updated. encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): - Customer-managed encryption key spec for an + Customer-managed encryption key spec for a Metadata Store. If set, this Metadata Store and - all sub-resources of this Metadata Store will be - secured by this key. + all sub-resources of this Metadata Store are + secured using this key. description (str): Description of the MetadataStore. state (google.cloud.aiplatform_v1beta1.types.MetadataStore.MetadataStoreState): @@ -51,7 +51,7 @@ class MetadataStore(proto.Message): """ class MetadataStoreState(proto.Message): - r"""Represent state information for a MetadataStore. + r"""Represents state information for a MetadataStore. Attributes: disk_utilization_bytes (int): The disk utilization of the MetadataStore in diff --git a/google/cloud/aiplatform_v1beta1/types/migration_service.py b/google/cloud/aiplatform_v1beta1/types/migration_service.py index 1bc2d2432a..4219a6a329 100644 --- a/google/cloud/aiplatform_v1beta1/types/migration_service.py +++ b/google/cloud/aiplatform_v1beta1/types/migration_service.py @@ -43,7 +43,7 @@ class SearchMigratableResourcesRequest(proto.Message): Attributes: parent (str): Required. The location that the migratable resources should - be searched from. It's the AI Platform location that the + be searched from. It's the Vertex AI location that the resources can be migrated to, not the resources' original location. Format: ``projects/{project}/locations/{location}`` @@ -128,28 +128,27 @@ class BatchMigrateResourcesRequest(proto.Message): class MigrateResourceRequest(proto.Message): r"""Config of migrating one resource from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to AI - Platform. + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. Attributes: migrate_ml_engine_model_version_config (google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest.MigrateMlEngineModelVersionConfig): Config for migrating Version in - ml.googleapis.com to AI Platform's Model. + ml.googleapis.com to Vertex AI's Model. migrate_automl_model_config (google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest.MigrateAutomlModelConfig): Config for migrating Model in - automl.googleapis.com to AI Platform's Model. + automl.googleapis.com to Vertex AI's Model. migrate_automl_dataset_config (google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest.MigrateAutomlDatasetConfig): Config for migrating Dataset in - automl.googleapis.com to AI Platform's Dataset. + automl.googleapis.com to Vertex AI's Dataset. migrate_data_labeling_dataset_config (google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest.MigrateDataLabelingDatasetConfig): Config for migrating Dataset in - datalabeling.googleapis.com to AI Platform's + datalabeling.googleapis.com to Vertex AI's Dataset. """ class MigrateMlEngineModelVersionConfig(proto.Message): - r"""Config for migrating version in ml.googleapis.com to AI - Platform's Model. + r"""Config for migrating version in ml.googleapis.com to Vertex + AI's Model. Attributes: endpoint (str): @@ -168,8 +167,8 @@ class MigrateMlEngineModelVersionConfig(proto.Message): Format: ``projects/{project}/models/{model}/versions/{version}``. model_display_name (str): - Required. Display name of the model in AI - Platform. System will pick a display name if + Required. Display name of the model in Vertex + AI. System will pick a display name if unspecified. """ @@ -178,16 +177,16 @@ class MigrateMlEngineModelVersionConfig(proto.Message): model_display_name = proto.Field(proto.STRING, number=3,) class MigrateAutomlModelConfig(proto.Message): - r"""Config for migrating Model in automl.googleapis.com to AI - Platform's Model. + r"""Config for migrating Model in automl.googleapis.com to Vertex + AI's Model. Attributes: model (str): Required. Full resource name of automl Model. Format: ``projects/{project}/locations/{location}/models/{model}``. model_display_name (str): - Optional. Display name of the model in AI - Platform. System will pick a display name if + Optional. Display name of the model in Vertex + AI. System will pick a display name if unspecified. """ @@ -195,16 +194,16 @@ class MigrateAutomlModelConfig(proto.Message): model_display_name = proto.Field(proto.STRING, number=2,) class MigrateAutomlDatasetConfig(proto.Message): - r"""Config for migrating Dataset in automl.googleapis.com to AI - Platform's Dataset. + r"""Config for migrating Dataset in automl.googleapis.com to + Vertex AI's Dataset. Attributes: dataset (str): Required. Full resource name of automl Dataset. Format: ``projects/{project}/locations/{location}/datasets/{dataset}``. dataset_display_name (str): - Required. Display name of the Dataset in AI - Platform. System will pick a display name if + Required. Display name of the Dataset in + Vertex AI. System will pick a display name if unspecified. """ @@ -220,20 +219,20 @@ class MigrateDataLabelingDatasetConfig(proto.Message): Required. Full resource name of data labeling Dataset. Format: ``projects/{project}/datasets/{dataset}``. dataset_display_name (str): - Optional. Display name of the Dataset in AI - Platform. System will pick a display name if + Optional. Display name of the Dataset in + Vertex AI. System will pick a display name if unspecified. migrate_data_labeling_annotated_dataset_configs (Sequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest.MigrateDataLabelingDatasetConfig.MigrateDataLabelingAnnotatedDatasetConfig]): Optional. Configs for migrating AnnotatedDataset in datalabeling.googleapis.com - to AI Platform's SavedQuery. The specified + to Vertex AI's SavedQuery. The specified AnnotatedDatasets have to belong to the datalabeling Dataset. """ class MigrateDataLabelingAnnotatedDatasetConfig(proto.Message): r"""Config for migrating AnnotatedDataset in - datalabeling.googleapis.com to AI Platform's SavedQuery. + datalabeling.googleapis.com to Vertex AI's SavedQuery. Attributes: annotated_dataset (str): diff --git a/google/cloud/aiplatform_v1beta1/types/model.py b/google/cloud/aiplatform_v1beta1/types/model.py index 2363d22fe3..1742047247 100644 --- a/google/cloud/aiplatform_v1beta1/types/model.py +++ b/google/cloud/aiplatform_v1beta1/types/model.py @@ -52,13 +52,13 @@ class Model(proto.Message): that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. - AutoML Models always have this field populated by AI - Platform, if no additional metadata is needed, this field is - set to an empty string. Note: The URI given on output will - be immutable and probably different, including the URI - scheme, than the one given on input. The output URI will - point to a location where the user only has a read access. + Object `__. + AutoML Models always have this field populated by Vertex AI, + if no additional metadata is needed, this field is set to an + empty string. Note: The URI given on output will be + immutable and probably different, including the URI scheme, + than the one given on input. The output URI will point to a + location where the user only has a read access. metadata (google.protobuf.struct_pb2.Value): Immutable. An additional information about the Model; the schema of the metadata can be found in @@ -78,7 +78,7 @@ class Model(proto.Message): ingested upon [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel], and all binaries it contains are copied and stored - internally by AI Platform. Not present for AutoML Models. + internally by Vertex AI. Not present for AutoML Models. artifact_uri (str): Immutable. The path to the directory containing the Model artifact and any of its @@ -186,7 +186,7 @@ class Model(proto.Message): [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this Model was - uploaded into AI Platform. + uploaded into Vertex AI. update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this Model was most recently updated. @@ -348,12 +348,12 @@ class PredictSchemata(proto.Message): and [BatchPredictionJob.input_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. - AutoML Models always have this field populated by AI - Platform. Note: The URI given on output will be immutable - and probably different, including the URI scheme, than the - one given on input. The output URI will point to a location - where the user only has a read access. + Object `__. + AutoML Models always have this field populated by Vertex AI. + Note: The URI given on output will be immutable and probably + different, including the URI scheme, than the one given on + input. The output URI will point to a location where the + user only has a read access. parameters_schema_uri (str): Immutable. Points to a YAML file stored on Google Cloud Storage describing the parameters of prediction and @@ -363,13 +363,13 @@ class PredictSchemata(proto.Message): and [BatchPredictionJob.model_parameters][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model_parameters]. The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. - AutoML Models always have this field populated by AI - Platform, if no parameters are supported, then it is set to - an empty string. Note: The URI given on output will be - immutable and probably different, including the URI scheme, - than the one given on input. The output URI will point to a - location where the user only has a read access. + Object `__. + AutoML Models always have this field populated by Vertex AI, + if no parameters are supported, then it is set to an empty + string. Note: The URI given on output will be immutable and + probably different, including the URI scheme, than the one + given on input. The output URI will point to a location + where the user only has a read access. prediction_schema_uri (str): Immutable. Points to a YAML file stored on Google Cloud Storage describing the format of a single prediction @@ -379,12 +379,12 @@ class PredictSchemata(proto.Message): and [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config]. The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. - AutoML Models always have this field populated by AI - Platform. Note: The URI given on output will be immutable - and probably different, including the URI scheme, than the - one given on input. The output URI will point to a location - where the user only has a read access. + Object `__. + AutoML Models always have this field populated by Vertex AI. + Note: The URI given on output will be immutable and probably + different, including the URI scheme, than the one given on + input. The output URI will point to a location where the + user only has a read access. """ instance_schema_uri = proto.Field(proto.STRING, number=1,) @@ -394,19 +394,19 @@ class PredictSchemata(proto.Message): class ModelContainerSpec(proto.Message): r"""Specification of a container for serving predictions. Some fields in - this message correspond to fields in the Kubernetes Container v1 + this message correspond to fields in the `Kubernetes Container v1 core - `specification `__. + specification `__. Attributes: image_uri (str): Required. Immutable. URI of the Docker image to be used as the custom container for serving predictions. This URI must identify an image in Artifact Registry or Container - Registry. Learn more about the container publishing - requirements, including permissions requirements for the AI - Platform Service Agent, - `here `__. + Registry. Learn more about the `container publishing + requirements `__, + including permissions requirements for the AI Platform + Service Agent. The container image is ingested upon [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel], @@ -415,11 +415,11 @@ class ModelContainerSpec(proto.Message): To learn about the requirements for the Docker image itself, see `Custom container - requirements `__. + requirements `__. - You can use the URI to one of AI Platform's `pre-built + You can use the URI to one of Vertex AI's `pre-built container images for - prediction `__ + prediction `__ in this field. command (Sequence[str]): Immutable. Specifies the command that runs when the @@ -436,20 +436,20 @@ class ModelContainerSpec(proto.Message): ```CMD`` `__, if either exists. If this field is not specified and the container does not have an ``ENTRYPOINT``, then refer to the - Docker documentation about how ``CMD`` and ``ENTRYPOINT`` - `interact `__. + Docker documentation about `how ``CMD`` and ``ENTRYPOINT`` + interact `__. If you specify this field, then you can also specify the ``args`` field to provide additional arguments for this command. However, if you specify this field, then the container's ``CMD`` is ignored. See the `Kubernetes - documentation `__ about how - the ``command`` and ``args`` fields interact with a - container's ``ENTRYPOINT`` and ``CMD``. + documentation about how the ``command`` and ``args`` fields + interact with a container's ``ENTRYPOINT`` and + ``CMD`` `__. - In this field, you can reference environment variables `set - by AI - Platform `__ + In this field, you can reference `environment variables set + by Vertex + AI `__ and environment variables set in the [env][google.cloud.aiplatform.v1beta1.ModelContainerSpec.env] field. You cannot reference environment variables set in the @@ -462,7 +462,7 @@ class ModelContainerSpec(proto.Message): this syntax with ``$$``; for example: $$(VARIABLE_NAME) This field corresponds to the ``command`` field of the Kubernetes Containers `v1 core - API `__. + API `__. args (Sequence[str]): Immutable. Specifies arguments for the command that runs when the container starts. This overrides the container's @@ -474,20 +474,21 @@ class ModelContainerSpec(proto.Message): [command][google.cloud.aiplatform.v1beta1.ModelContainerSpec.command] field, then the command from the ``command`` field runs without any additional arguments. See the `Kubernetes - documentation `__ about how - the ``command`` and ``args`` fields interact with a - container's ``ENTRYPOINT`` and ``CMD``. + documentation about how the ``command`` and ``args`` fields + interact with a container's ``ENTRYPOINT`` and + ``CMD`` `__. If you don't specify this field and don't specify the ``command`` field, then the container's ```ENTRYPOINT`` `__ and ``CMD`` determine what runs based on their default - behavior. See the Docker documentation about how ``CMD`` and - ``ENTRYPOINT`` `interact `__. + behavior. See the Docker documentation about `how ``CMD`` + and ``ENTRYPOINT`` + interact `__. - In this field, you can reference environment variables `set - by AI - Platform `__ + In this field, you can reference `environment variables set + by Vertex + AI `__ and environment variables set in the [env][google.cloud.aiplatform.v1beta1.ModelContainerSpec.env] field. You cannot reference environment variables set in the @@ -500,7 +501,7 @@ class ModelContainerSpec(proto.Message): this syntax with ``$$``; for example: $$(VARIABLE_NAME) This field corresponds to the ``args`` field of the Kubernetes Containers `v1 core - API `__. + API `__. env (Sequence[google.cloud.aiplatform_v1beta1.types.EnvVar]): Immutable. List of environment variables to set in the container. After the container starts running, code running @@ -533,14 +534,14 @@ class ModelContainerSpec(proto.Message): This field corresponds to the ``env`` field of the Kubernetes Containers `v1 core - API `__. + API `__. ports (Sequence[google.cloud.aiplatform_v1beta1.types.Port]): - Immutable. List of ports to expose from the container. AI - Platform sends any prediction requests that it receives to + Immutable. List of ports to expose from the container. + Vertex AI sends any prediction requests that it receives to the first port on this list. AI Platform also sends `liveness and health - checks `__ to - this port. + checks `__ + to this port. If you do not specify this field, it defaults to following value: @@ -553,20 +554,20 @@ class ModelContainerSpec(proto.Message): } ] - AI Platform does not use ports other than the first one + Vertex AI does not use ports other than the first one listed. This field corresponds to the ``ports`` field of the Kubernetes Containers `v1 core - API `__. + API `__. predict_route (str): Immutable. HTTP path on the container to send prediction - requests to. AI Platform forwards requests sent using + requests to. Vertex AI forwards requests sent using [projects.locations.endpoints.predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] - to this path on the container's IP address and port. AI - Platform then returns the container's response in the API + to this path on the container's IP address and port. Vertex + AI then returns the container's response in the API response. - For example, if you set this field to ``/foo``, then when AI - Platform receives a prediction request, it forwards the + For example, if you set this field to ``/foo``, then when + Vertex AI receives a prediction request, it forwards the request body in a POST request to the ``/foo`` path on the port of your container specified by the first value of this ``ModelContainerSpec``'s @@ -581,28 +582,28 @@ class ModelContainerSpec(proto.Message): - ENDPOINT: The last segment (following ``endpoints/``)of the Endpoint.name][] field of the Endpoint where this - Model has been deployed. (AI Platform makes this value + Model has been deployed. (Vertex AI makes this value available to your container code as the - ```AIP_ENDPOINT_ID`` `__ - environment variable.) + ```AIP_ENDPOINT_ID`` environment + variable `__.) - DEPLOYED_MODEL: [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] - of the ``DeployedModel``. (AI Platform makes this value + of the ``DeployedModel``. (Vertex AI makes this value available to your container code as the ```AIP_DEPLOYED_MODEL_ID`` environment - variable `__.) + variable `__.) health_route (str): Immutable. HTTP path on the container to send health checks - to. AI Platform intermittently sends GET requests to this - path on the container's IP address and port to check that - the container is healthy. Read more about `health - checks `__. - - For example, if you set this field to ``/bar``, then AI - Platform intermittently sends a GET request to the ``/bar`` - path on the port of your container specified by the first - value of this ``ModelContainerSpec``'s + to. Vertex AI intermittently sends GET requests to this path + on the container's IP address and port to check that the + container is healthy. Read more about `health + checks `__. + + For example, if you set this field to ``/bar``, then Vertex + AI intermittently sends a GET request to the ``/bar`` path + on the port of your container specified by the first value + of this ``ModelContainerSpec``'s [ports][google.cloud.aiplatform.v1beta1.ModelContainerSpec.ports] field. @@ -614,17 +615,17 @@ class ModelContainerSpec(proto.Message): - ENDPOINT: The last segment (following ``endpoints/``)of the Endpoint.name][] field of the Endpoint where this - Model has been deployed. (AI Platform makes this value + Model has been deployed. (Vertex AI makes this value available to your container code as the - ```AIP_ENDPOINT_ID`` `__ - environment variable.) + ```AIP_ENDPOINT_ID`` environment + variable `__.) - DEPLOYED_MODEL: [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] - of the ``DeployedModel``. (AI Platform makes this value + of the ``DeployedModel``. (Vertex AI makes this value available to your container code as the - ```AIP_DEPLOYED_MODEL_ID`` `__ - environment variable.) + ```AIP_DEPLOYED_MODEL_ID`` environment + variable `__.) """ image_uri = proto.Field(proto.STRING, number=1,) diff --git a/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py b/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py index 0a05808db9..256b06b283 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py +++ b/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py @@ -113,7 +113,7 @@ class ModelDeploymentMonitoringJob(proto.Message): type) as prediction request/response. If there are any data type differences between predict instance and TFDV instance, this field can be used to override the schema. For models - trained with AI Platform, this field must be set as all the + trained with Vertex AI, this field must be set as all the fields in predict instance formatted as string. bigquery_tables (Sequence[google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringBigQueryTable]): Output only. The created bigquery tables for diff --git a/google/cloud/aiplatform_v1beta1/types/model_evaluation.py b/google/cloud/aiplatform_v1beta1/types/model_evaluation.py index d7ef59b8f9..cb3711881f 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_evaluation.py +++ b/google/cloud/aiplatform_v1beta1/types/model_evaluation.py @@ -40,7 +40,7 @@ class ModelEvaluation(proto.Message): [metrics][google.cloud.aiplatform.v1beta1.ModelEvaluation.metrics] of this ModelEvaluation. The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. + Object `__. metrics (google.protobuf.struct_pb2.Value): Output only. Evaluation metrics of the Model. The schema of the metrics is stored in diff --git a/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py b/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py index 94066203c6..5850539095 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py +++ b/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py @@ -42,7 +42,7 @@ class ModelEvaluationSlice(proto.Message): [metrics][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.metrics] of this ModelEvaluationSlice. The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. + Object `__. metrics (google.protobuf.struct_pb2.Value): Output only. Sliced evaluation metrics of the Model. The schema of the metrics is stored in diff --git a/google/cloud/aiplatform_v1beta1/types/model_service.py b/google/cloud/aiplatform_v1beta1/types/model_service.py index 9f94a8fed9..569fe3de91 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_service.py +++ b/google/cloud/aiplatform_v1beta1/types/model_service.py @@ -186,7 +186,7 @@ class UpdateModelRequest(proto.Message): update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The update mask applies to the resource. For the ``FieldMask`` definition, see - `FieldMask `__. + [google.protobuf.FieldMask][google.protobuf.FieldMask]. """ model = proto.Field(proto.MESSAGE, number=1, message=gca_model.Model,) diff --git a/google/cloud/aiplatform_v1beta1/types/pipeline_job.py b/google/cloud/aiplatform_v1beta1/types/pipeline_job.py index 120d2ce71f..a050bbfd75 100644 --- a/google/cloud/aiplatform_v1beta1/types/pipeline_job.py +++ b/google/cloud/aiplatform_v1beta1/types/pipeline_job.py @@ -57,9 +57,7 @@ class PipelineJob(proto.Message): Output only. Timestamp when this PipelineJob was most recently updated. pipeline_spec (google.protobuf.struct_pb2.Struct): - Required. The spec of the pipeline. The spec contains a - ``schema_version`` field which indicates the Kubeflow - Pipeline schema version to decode the struct. + Required. The spec of the pipeline. state (google.cloud.aiplatform_v1beta1.types.PipelineState): Output only. The detailed state of the job. job_detail (google.cloud.aiplatform_v1beta1.types.PipelineJobDetail): @@ -108,8 +106,8 @@ class PipelineJob(proto.Message): Private services access must already be configured for the network. Pipeline job will apply the network configuration to the GCP resources being launched, if applied, such as - Cloud AI Platform Training or Dataflow job. If left - unspecified, the workload is not peered with any network. + Vertex AI Training or Dataflow job. If left unspecified, the + workload is not peered with any network. """ class RuntimeConfig(proto.Message): diff --git a/google/cloud/aiplatform_v1beta1/types/pipeline_service.py b/google/cloud/aiplatform_v1beta1/types/pipeline_service.py index f2ba495371..a4c868011d 100644 --- a/google/cloud/aiplatform_v1beta1/types/pipeline_service.py +++ b/google/cloud/aiplatform_v1beta1/types/pipeline_service.py @@ -221,10 +221,11 @@ class ListPipelineJobsRequest(proto.Message): filter (str): The standard list filter. Supported fields: - - ``display_name`` supports = and !=. - - ``state`` supports = and !=. + - ``display_name`` supports ``=`` and ``!=``. + - ``state`` supports ``=`` and ``!=``. - Some examples of using the filter are: + The following examples demonstrate how to filter the list of + PipelineJobs: - ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"`` - ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"`` diff --git a/google/cloud/aiplatform_v1beta1/types/training_pipeline.py b/google/cloud/aiplatform_v1beta1/types/training_pipeline.py index 4313865bac..e01ed8a882 100644 --- a/google/cloud/aiplatform_v1beta1/types/training_pipeline.py +++ b/google/cloud/aiplatform_v1beta1/types/training_pipeline.py @@ -40,10 +40,10 @@ class TrainingPipeline(proto.Message): r"""The TrainingPipeline orchestrates tasks associated with training a Model. It always executes the training task, and optionally may also - export data from AI Platform's Dataset which becomes the training + export data from Vertex AI's Dataset which becomes the training input, [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] - the Model to AI Platform, and evaluate the Model. + the Model to Vertex AI, and evaluate the Model. Attributes: name (str): @@ -53,7 +53,7 @@ class TrainingPipeline(proto.Message): Required. The user-defined name of this TrainingPipeline. input_data_config (google.cloud.aiplatform_v1beta1.types.InputDataConfig): - Specifies AI Platform owned input data that may be used for + Specifies Vertex AI owned input data that may be used for training the Model. The TrainingPipeline's [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition] should make clear whether this config is used and if there @@ -105,10 +105,10 @@ class TrainingPipeline(proto.Message): does not support uploading a Model as part of the pipeline. When the Pipeline's state becomes ``PIPELINE_STATE_SUCCEEDED`` and the trained Model had been - uploaded into AI Platform, then the model_to_upload's - resource [name][google.cloud.aiplatform.v1beta1.Model.name] - is populated. The Model is always uploaded into the Project - and Location in which this pipeline is. + uploaded into Vertex AI, then the model_to_upload's resource + [name][google.cloud.aiplatform.v1beta1.Model.name] is + populated. The Model is always uploaded into the Project and + Location in which this pipeline is. state (google.cloud.aiplatform_v1beta1.types.PipelineState): Output only. The detailed state of the pipeline. @@ -176,8 +176,8 @@ class TrainingPipeline(proto.Message): class InputDataConfig(proto.Message): - r"""Specifies AI Platform owned input data to be used for - training, and possibly evaluating, the Model. + r"""Specifies Vertex AI owned input data to be used for training, + and possibly evaluating, the Model. Attributes: fraction_split (google.cloud.aiplatform_v1beta1.types.FractionSplit): @@ -202,7 +202,7 @@ class InputDataConfig(proto.Message): format. All training input data is written into that directory. - The AI Platform environment variables representing Cloud + The Vertex AI environment variables representing Cloud Storage data URIs are represented in the Cloud Storage wildcard format to support sharded data. e.g.: "gs://.../training-*.jsonl" @@ -259,8 +259,8 @@ class InputDataConfig(proto.Message): ignored by the split method are used in respectively training, validation or test role, depending on the role of the DataItem they are on (for the auto-assigned that role is - decided by AI Platform). A filter with same syntax as the - one used in + decided by Vertex AI). A filter with same syntax as the one + used in [ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations] may be used, but note here it filters across all Annotations of the Dataset, and not just within a single DataItem. @@ -271,8 +271,8 @@ class InputDataConfig(proto.Message): Cloud Storage URI that points to a YAML file describing the annotation schema. The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. The - schema files that can be used here are found in + Object `__. + The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/ , note that the chosen schema must be consistent with [metadata][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri] @@ -320,7 +320,7 @@ class FractionSplit(proto.Message): the given fractions. Any of ``training_fraction``, ``validation_fraction`` and ``test_fraction`` may optionally be provided, they must sum to up to 1. If the provided ones sum to less - than 1, the remainder is assigned to sets as decided by AI Platform. + than 1, the remainder is assigned to sets as decided by Vertex AI. If none of the fractions are set, by default roughly 80% of data is used for training, 10% for validation, and 10% for test. diff --git a/noxfile.py b/noxfile.py index cd85c2b17e..b46cf43a0f 100644 --- a/noxfile.py +++ b/noxfile.py @@ -140,7 +140,7 @@ def system(session): # Install all test dependencies, then install this package into the # virtualenv's dist-packages. session.install("mock", "pytest", "google-cloud-testutils", "-c", constraints_path) - session.install("-e", ".", "-c", constraints_path) + session.install("-e", ".[testing]", "-c", constraints_path) # Run py.test against the system tests. if system_test_exists: @@ -169,7 +169,7 @@ def cover(session): test runs (not system test runs), and then erases coverage data. """ session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=95") + session.run("coverage", "report", "--show-missing", "--fail-under=99") session.run("coverage", "erase") diff --git a/owlbot.py b/owlbot.py new file mode 100644 index 0000000000..6e2a1e8a0b --- /dev/null +++ b/owlbot.py @@ -0,0 +1,97 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This script is used to synthesize generated parts of this library.""" + +import os + +import synthtool as s +import synthtool.gcp as gcp +from synthtool.languages import python + +common = gcp.CommonTemplates() + +default_version = "v1" + +for library in s.get_staging_dirs(default_version): + # --------------------------------------------------------------------- + # Patch each version of the library + # --------------------------------------------------------------------- + + # https://github.com/googleapis/gapic-generator-python/issues/413 + s.replace( + library / f"google/cloud/aiplatform_{library.name}/services/prediction_service/client.py", + "request.instances = instances", + "request.instances.extend(instances)", + ) + + # https://github.com/googleapis/gapic-generator-python/issues/672 + s.replace( + library / f"google/cloud/aiplatform_{library.name}/services/endpoint_service/client.py", + "request.traffic_split.extend\(traffic_split\)", + "request.traffic_split = traffic_split", + ) + + s.move( + library, + excludes=[ + ".pre-commit-config.yaml", + "setup.py", + "README.rst", + "docs/index.rst", + f"docs/definition_{library.name}/services.rst", + f"docs/instance_{library.name}/services.rst", + f"docs/params_{library.name}/services.rst", + f"docs/prediction_{library.name}/services.rst", + f"scripts/fixup_aiplatform_{library.name}_keywords.py", + f"scripts/fixup_definition_{library.name}_keywords.py", + f"scripts/fixup_instance_{library.name}_keywords.py", + f"scripts/fixup_params_{library.name}_keywords.py", + f"scripts/fixup_prediction_{library.name}_keywords.py", + "google/cloud/aiplatform/__init__.py", + f"google/cloud/aiplatform/{library.name}/schema/**/services/", + f"tests/unit/gapic/aiplatform_{library.name}/test_prediction_service.py", + f"tests/unit/gapic/definition_{library.name}/", + f"tests/unit/gapic/instance_{library.name}/", + f"tests/unit/gapic/params_{library.name}/", + f"tests/unit/gapic/prediction_{library.name}/", + ], + ) + +s.remove_staging_dirs() + +# ---------------------------------------------------------------------------- +# Add templated files +# ---------------------------------------------------------------------------- + +templated_files = common.py_library(cov_level=99, microgenerator=True) +s.move( + templated_files, + excludes=[ + ".coveragerc", + ".kokoro/samples/**" + ] +) # the microgenerator has a good coveragerc file + +# Don't treat docs warnings as errors +s.replace("noxfile.py", """["']-W["'], # warnings as errors""", "") + +# Replacement to install extra testing dependencies +s.replace( + "noxfile.py", + """session.install\("-e", ".", "-c", constraints_path\)""", + """session.install("-e", ".[testing]", "-c", constraints_path)""" +) + +s.shell.run(["nox", "-s", "blacken"], hide_output=False) \ No newline at end of file diff --git a/schema/predict/instance/MANIFEST.in b/schema/predict/instance/MANIFEST.in new file mode 100644 index 0000000000..05ea9a66cb --- /dev/null +++ b/schema/predict/instance/MANIFEST.in @@ -0,0 +1,2 @@ +recursive-include google/cloud/aiplatform/v1/schema/predict/instance *.py +recursive-include google/cloud/aiplatform/v1/schema/predict/instance_v1 *.py diff --git a/schema/predict/instance/README.rst b/schema/predict/instance/README.rst new file mode 100644 index 0000000000..5e0175e9e4 --- /dev/null +++ b/schema/predict/instance/README.rst @@ -0,0 +1,49 @@ +Python Client for Google Cloud Aiplatform V1 Schema Predict Instance API +================================================= + +Quick Start +----------- + +In order to use this library, you first need to go through the following steps: + +1. `Select or create a Cloud Platform project.`_ +2. `Enable billing for your project.`_ +3. Enable the Google Cloud Aiplatform V1 Schema Predict Instance API. +4. `Setup Authentication.`_ + +.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project +.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project +.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html + +Installation +~~~~~~~~~~~~ + +Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to +create isolated Python environments. The basic problem it addresses is one of +dependencies and versions, and indirectly permissions. + +With `virtualenv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. + +.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ + + +Mac/Linux +^^^^^^^^^ + +.. code-block:: console + + python3 -m venv + source /bin/activate + /bin/pip install /path/to/library + + +Windows +^^^^^^^ + +.. code-block:: console + + python3 -m venv + \Scripts\activate + \Scripts\pip.exe install \path\to\library diff --git a/schema/predict/instance/docs/conf.py b/schema/predict/instance/docs/conf.py new file mode 100644 index 0000000000..a12fb006b9 --- /dev/null +++ b/schema/predict/instance/docs/conf.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# google-cloud-aiplatform-v1-schema-predict-instance documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath("..")) + +__version__ = "0.1.0" + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +needs_sphinx = "1.6.3" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.napoleon", + "sphinx.ext.todo", + "sphinx.ext.viewcode", +] + +# autodoc/autosummary flags +autoclass_content = "both" +autodoc_default_flags = ["members"] +autosummary_generate = True + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# Allow markdown includes (so releases.md can include CHANGLEOG.md) +# http://www.sphinx-doc.org/en/master/markdown.html +source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +source_suffix = [".rst", ".md"] + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = "index" + +# General information about the project. +project = u"google-cloud-aiplatform-v1-schema-predict-instance" +copyright = u"2020, Google, LLC" +author = u"Google APIs" # TODO: autogenerate this bit + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = __version__ +# The short X.Y version. +version = ".".join(release.split(".")[0:2]) + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ["_build"] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "alabaster" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "description": "Google Cloud Aiplatform V1 Schema Predict Client Libraries for Python", + "github_user": "googleapis", + "github_repo": "google-cloud-python", + "github_banner": True, + "font_family": "'Roboto', Georgia, sans", + "head_font_family": "'Roboto', Georgia, serif", + "code_font_family": "'Roboto Mono', 'Consolas', monospace", +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "google-cloud-aiplatform-v1-schema-predict-instance-doc" + +# -- Options for warnings ------------------------------------------------------ + + +suppress_warnings = [ + # Temporarily suppress this to avoid "more than one target found for + # cross-reference" warning, which are intractable for us to avoid while in + # a mono-repo. + # See https://github.com/sphinx-doc/sphinx/blob + # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 + "ref.python" +] + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # 'preamble': '', + # Latex figure (float) alignment + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + master_doc, + "google-cloud-aiplatform-v1-schema-predict-instance.tex", + u"google-cloud-aiplatform-v1-schema-predict-instance Documentation", + author, + "manual", + ) +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ( + master_doc, + "google-cloud-aiplatform-v1-schema-predict-instance", + u"Google Cloud Aiplatform V1 Schema Predict Instance Documentation", + [author], + 1, + ) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + master_doc, + "google-cloud-aiplatform-v1-schema-predict-instance", + u"google-cloud-aiplatform-v1-schema-predict-instance Documentation", + author, + "google-cloud-aiplatform-v1-schema-predict-instance", + "GAPIC library for Google Cloud Aiplatform V1 Schema Predict Instance API", + "APIs", + ) +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + "python": ("http://python.readthedocs.org/en/latest/", None), + "gax": ("https://gax-python.readthedocs.org/en/latest/", None), + "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), + "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), + "grpc": ("https://grpc.io/grpc/python/", None), + "requests": ("http://requests.kennethreitz.org/en/stable/", None), + "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), + "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), +} + + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True diff --git a/schema/predict/instance/docs/index.rst b/schema/predict/instance/docs/index.rst new file mode 100644 index 0000000000..e6c2d156ca --- /dev/null +++ b/schema/predict/instance/docs/index.rst @@ -0,0 +1,7 @@ +API Reference +------------- +.. toctree:: + :maxdepth: 2 + + instance_v1/services + instance_v1/types diff --git a/schema/predict/instance/docs/instance_v1/services.rst b/schema/predict/instance/docs/instance_v1/services.rst new file mode 100644 index 0000000000..50c011c69a --- /dev/null +++ b/schema/predict/instance/docs/instance_v1/services.rst @@ -0,0 +1,4 @@ +Services for Google Cloud Aiplatform V1 Schema Predict Instance v1 API +====================================================================== +.. toctree:: + :maxdepth: 2 diff --git a/schema/predict/instance/docs/instance_v1/types.rst b/schema/predict/instance/docs/instance_v1/types.rst new file mode 100644 index 0000000000..564ab013ee --- /dev/null +++ b/schema/predict/instance/docs/instance_v1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform V1 Schema Predict Instance v1 API +=================================================================== + +.. automodule:: google.cloud.aiplatform.v1.schema.predict.instance_v1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/schema/predict/instance/docs/instance_v1beta1/services.rst b/schema/predict/instance/docs/instance_v1beta1/services.rst new file mode 100644 index 0000000000..941dbcca59 --- /dev/null +++ b/schema/predict/instance/docs/instance_v1beta1/services.rst @@ -0,0 +1,4 @@ +Services for Google Cloud Aiplatform V1beta1 Schema Predict Instance v1beta1 API +================================================================================ +.. toctree:: + :maxdepth: 2 diff --git a/schema/predict/instance/docs/instance_v1beta1/types.rst b/schema/predict/instance/docs/instance_v1beta1/types.rst new file mode 100644 index 0000000000..7caa088065 --- /dev/null +++ b/schema/predict/instance/docs/instance_v1beta1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform V1beta1 Schema Predict Instance v1beta1 API +============================================================================= + +.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py new file mode 100644 index 0000000000..41d6704c1f --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.image_classification import ImageClassificationPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.image_object_detection import ImageObjectDetectionPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.image_segmentation import ImageSegmentationPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.text_classification import TextClassificationPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.text_extraction import TextExtractionPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.text_sentiment import TextSentimentPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.video_action_recognition import VideoActionRecognitionPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.video_classification import VideoClassificationPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.video_object_tracking import VideoObjectTrackingPredictionInstance + +__all__ = ('ImageClassificationPredictionInstance', + 'ImageObjectDetectionPredictionInstance', + 'ImageSegmentationPredictionInstance', + 'TextClassificationPredictionInstance', + 'TextExtractionPredictionInstance', + 'TextSentimentPredictionInstance', + 'VideoActionRecognitionPredictionInstance', + 'VideoClassificationPredictionInstance', + 'VideoObjectTrackingPredictionInstance', +) diff --git a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance/py.typed b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance/py.typed new file mode 100644 index 0000000000..f70e7f605a --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1-schema-predict-instance package uses inline types. diff --git a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py new file mode 100644 index 0000000000..41ab5407a7 --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from .types.image_classification import ImageClassificationPredictionInstance +from .types.image_object_detection import ImageObjectDetectionPredictionInstance +from .types.image_segmentation import ImageSegmentationPredictionInstance +from .types.text_classification import TextClassificationPredictionInstance +from .types.text_extraction import TextExtractionPredictionInstance +from .types.text_sentiment import TextSentimentPredictionInstance +from .types.video_action_recognition import VideoActionRecognitionPredictionInstance +from .types.video_classification import VideoClassificationPredictionInstance +from .types.video_object_tracking import VideoObjectTrackingPredictionInstance + +__all__ = ( +'ImageClassificationPredictionInstance', +'ImageObjectDetectionPredictionInstance', +'ImageSegmentationPredictionInstance', +'TextClassificationPredictionInstance', +'TextExtractionPredictionInstance', +'TextSentimentPredictionInstance', +'VideoActionRecognitionPredictionInstance', +'VideoClassificationPredictionInstance', +'VideoObjectTrackingPredictionInstance', +) diff --git a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_metadata.json b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_metadata.json new file mode 100644 index 0000000000..0ae909d6ea --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_metadata.json @@ -0,0 +1,7 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform.v1.schema.predict.instance_v1", + "protoPackage": "google.cloud.aiplatform.v1.schema.predict.instance", + "schema": "1.0" +} diff --git a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed new file mode 100644 index 0000000000..f70e7f605a --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1-schema-predict-instance package uses inline types. diff --git a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/services/__init__.py b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/services/__init__.py new file mode 100644 index 0000000000..4de65971c2 --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py new file mode 100644 index 0000000000..80a5332604 --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .image_classification import ( + ImageClassificationPredictionInstance, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionInstance, +) +from .image_segmentation import ( + ImageSegmentationPredictionInstance, +) +from .text_classification import ( + TextClassificationPredictionInstance, +) +from .text_extraction import ( + TextExtractionPredictionInstance, +) +from .text_sentiment import ( + TextSentimentPredictionInstance, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionInstance, +) +from .video_classification import ( + VideoClassificationPredictionInstance, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionInstance, +) + +__all__ = ( + 'ImageClassificationPredictionInstance', + 'ImageObjectDetectionPredictionInstance', + 'ImageSegmentationPredictionInstance', + 'TextClassificationPredictionInstance', + 'TextExtractionPredictionInstance', + 'TextSentimentPredictionInstance', + 'VideoActionRecognitionPredictionInstance', + 'VideoClassificationPredictionInstance', + 'VideoObjectTrackingPredictionInstance', +) diff --git a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py new file mode 100644 index 0000000000..94f46a1af3 --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'ImageClassificationPredictionInstance', + }, +) + + +class ImageClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Image Classification. + Attributes: + content (str): + The image bytes or GCS URI to make the + prediction on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/gif + - image/png + - image/webp + - image/bmp + - image/tiff + - image/vnd.microsoft.icon + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py new file mode 100644 index 0000000000..bd250ab219 --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'ImageObjectDetectionPredictionInstance', + }, +) + + +class ImageObjectDetectionPredictionInstance(proto.Message): + r"""Prediction input format for Image Object Detection. + Attributes: + content (str): + The image bytes or GCS URI to make the + prediction on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/gif + - image/png + - image/webp + - image/bmp + - image/tiff + - image/vnd.microsoft.icon + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py new file mode 100644 index 0000000000..f967807e6c --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'ImageSegmentationPredictionInstance', + }, +) + + +class ImageSegmentationPredictionInstance(proto.Message): + r"""Prediction input format for Image Segmentation. + Attributes: + content (str): + The image bytes to make the predictions on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/png + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py new file mode 100644 index 0000000000..4eec13516c --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'TextClassificationPredictionInstance', + }, +) + + +class TextClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Text Classification. + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py new file mode 100644 index 0000000000..a52c7df050 --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'TextExtractionPredictionInstance', + }, +) + + +class TextExtractionPredictionInstance(proto.Message): + r"""Prediction input format for Text Extraction. + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + key (str): + This field is only used for batch prediction. + If a key is provided, the batch prediction + result will by mapped to this key. If omitted, + then the batch prediction result will contain + the entire input instance. AI Platform will not + check if keys in the request are duplicates, so + it is up to the caller to ensure the keys are + unique. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + key = proto.Field( + proto.STRING, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py new file mode 100644 index 0000000000..5bdfe5d5ba --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'TextSentimentPredictionInstance', + }, +) + + +class TextSentimentPredictionInstance(proto.Message): + r"""Prediction input format for Text Sentiment. + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py new file mode 100644 index 0000000000..d53782868f --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'VideoActionRecognitionPredictionInstance', + }, +) + + +class VideoActionRecognitionPredictionInstance(proto.Message): + r"""Prediction input format for Video Action Recognition. + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "inf" or "Infinity" + is allowed, which means the end of the video. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.STRING, + number=3, + ) + time_segment_end = proto.Field( + proto.STRING, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py new file mode 100644 index 0000000000..b51ab464a4 --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'VideoClassificationPredictionInstance', + }, +) + + +class VideoClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Video Classification. + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "inf" or "Infinity" + is allowed, which means the end of the video. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.STRING, + number=3, + ) + time_segment_end = proto.Field( + proto.STRING, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py new file mode 100644 index 0000000000..8b96f75fd2 --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'VideoObjectTrackingPredictionInstance', + }, +) + + +class VideoObjectTrackingPredictionInstance(proto.Message): + r"""Prediction input format for Video Object Tracking. + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "inf" or "Infinity" + is allowed, which means the end of the video. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.STRING, + number=3, + ) + time_segment_end = proto.Field( + proto.STRING, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py new file mode 100644 index 0000000000..5f9e065de0 --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_classification import ImageClassificationPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_object_detection import ImageObjectDetectionPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_segmentation import ImageSegmentationPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.text_classification import TextClassificationPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.text_extraction import TextExtractionPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.text_sentiment import TextSentimentPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.video_action_recognition import VideoActionRecognitionPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.video_classification import VideoClassificationPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.video_object_tracking import VideoObjectTrackingPredictionInstance + +__all__ = ('ImageClassificationPredictionInstance', + 'ImageObjectDetectionPredictionInstance', + 'ImageSegmentationPredictionInstance', + 'TextClassificationPredictionInstance', + 'TextExtractionPredictionInstance', + 'TextSentimentPredictionInstance', + 'VideoActionRecognitionPredictionInstance', + 'VideoClassificationPredictionInstance', + 'VideoObjectTrackingPredictionInstance', +) diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed new file mode 100644 index 0000000000..46ccbaf568 --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-instance package uses inline types. diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py new file mode 100644 index 0000000000..41ab5407a7 --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from .types.image_classification import ImageClassificationPredictionInstance +from .types.image_object_detection import ImageObjectDetectionPredictionInstance +from .types.image_segmentation import ImageSegmentationPredictionInstance +from .types.text_classification import TextClassificationPredictionInstance +from .types.text_extraction import TextExtractionPredictionInstance +from .types.text_sentiment import TextSentimentPredictionInstance +from .types.video_action_recognition import VideoActionRecognitionPredictionInstance +from .types.video_classification import VideoClassificationPredictionInstance +from .types.video_object_tracking import VideoObjectTrackingPredictionInstance + +__all__ = ( +'ImageClassificationPredictionInstance', +'ImageObjectDetectionPredictionInstance', +'ImageSegmentationPredictionInstance', +'TextClassificationPredictionInstance', +'TextExtractionPredictionInstance', +'TextSentimentPredictionInstance', +'VideoActionRecognitionPredictionInstance', +'VideoClassificationPredictionInstance', +'VideoObjectTrackingPredictionInstance', +) diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_metadata.json b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_metadata.json new file mode 100644 index 0000000000..38379e8208 --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_metadata.json @@ -0,0 +1,7 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1", + "protoPackage": "google.cloud.aiplatform.v1beta1.schema.predict.instance", + "schema": "1.0" +} diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed new file mode 100644 index 0000000000..46ccbaf568 --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-instance package uses inline types. diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/services/__init__.py b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/services/__init__.py new file mode 100644 index 0000000000..4de65971c2 --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py new file mode 100644 index 0000000000..80a5332604 --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .image_classification import ( + ImageClassificationPredictionInstance, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionInstance, +) +from .image_segmentation import ( + ImageSegmentationPredictionInstance, +) +from .text_classification import ( + TextClassificationPredictionInstance, +) +from .text_extraction import ( + TextExtractionPredictionInstance, +) +from .text_sentiment import ( + TextSentimentPredictionInstance, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionInstance, +) +from .video_classification import ( + VideoClassificationPredictionInstance, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionInstance, +) + +__all__ = ( + 'ImageClassificationPredictionInstance', + 'ImageObjectDetectionPredictionInstance', + 'ImageSegmentationPredictionInstance', + 'TextClassificationPredictionInstance', + 'TextExtractionPredictionInstance', + 'TextSentimentPredictionInstance', + 'VideoActionRecognitionPredictionInstance', + 'VideoClassificationPredictionInstance', + 'VideoObjectTrackingPredictionInstance', +) diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py new file mode 100644 index 0000000000..c85d4a96cd --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'ImageClassificationPredictionInstance', + }, +) + + +class ImageClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Image Classification. + Attributes: + content (str): + The image bytes or GCS URI to make the + prediction on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/gif + - image/png + - image/webp + - image/bmp + - image/tiff + - image/vnd.microsoft.icon + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py new file mode 100644 index 0000000000..d9895e3372 --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'ImageObjectDetectionPredictionInstance', + }, +) + + +class ImageObjectDetectionPredictionInstance(proto.Message): + r"""Prediction input format for Image Object Detection. + Attributes: + content (str): + The image bytes or GCS URI to make the + prediction on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/gif + - image/png + - image/webp + - image/bmp + - image/tiff + - image/vnd.microsoft.icon + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py new file mode 100644 index 0000000000..e1b5cfc21f --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'ImageSegmentationPredictionInstance', + }, +) + + +class ImageSegmentationPredictionInstance(proto.Message): + r"""Prediction input format for Image Segmentation. + Attributes: + content (str): + The image bytes to make the predictions on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/png + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py new file mode 100644 index 0000000000..0c1ea43a72 --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'TextClassificationPredictionInstance', + }, +) + + +class TextClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Text Classification. + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py new file mode 100644 index 0000000000..0b1304d1c3 --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'TextExtractionPredictionInstance', + }, +) + + +class TextExtractionPredictionInstance(proto.Message): + r"""Prediction input format for Text Extraction. + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + key (str): + This field is only used for batch prediction. + If a key is provided, the batch prediction + result will by mapped to this key. If omitted, + then the batch prediction result will contain + the entire input instance. AI Platform will not + check if keys in the request are duplicates, so + it is up to the caller to ensure the keys are + unique. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + key = proto.Field( + proto.STRING, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py new file mode 100644 index 0000000000..ab416779b6 --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'TextSentimentPredictionInstance', + }, +) + + +class TextSentimentPredictionInstance(proto.Message): + r"""Prediction input format for Text Sentiment. + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py new file mode 100644 index 0000000000..c7a76efda2 --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'VideoActionRecognitionPredictionInstance', + }, +) + + +class VideoActionRecognitionPredictionInstance(proto.Message): + r"""Prediction input format for Video Action Recognition. + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "inf" or "Infinity" + is allowed, which means the end of the video. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.STRING, + number=3, + ) + time_segment_end = proto.Field( + proto.STRING, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py new file mode 100644 index 0000000000..56d662ef88 --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'VideoClassificationPredictionInstance', + }, +) + + +class VideoClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Video Classification. + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "inf" or "Infinity" + is allowed, which means the end of the video. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.STRING, + number=3, + ) + time_segment_end = proto.Field( + proto.STRING, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py new file mode 100644 index 0000000000..7344d419a8 --- /dev/null +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'VideoObjectTrackingPredictionInstance', + }, +) + + +class VideoObjectTrackingPredictionInstance(proto.Message): + r"""Prediction input format for Video Object Tracking. + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "inf" or "Infinity" + is allowed, which means the end of the video. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.STRING, + number=3, + ) + time_segment_end = proto.Field( + proto.STRING, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/instance/mypy.ini b/schema/predict/instance/mypy.ini new file mode 100644 index 0000000000..4505b48543 --- /dev/null +++ b/schema/predict/instance/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.6 +namespace_packages = True diff --git a/schema/predict/instance/noxfile.py b/schema/predict/instance/noxfile.py new file mode 100644 index 0000000000..f9e24efc02 --- /dev/null +++ b/schema/predict/instance/noxfile.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import pathlib +import shutil +import subprocess +import sys + + +import nox # type: ignore + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + +LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" +PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") + + +nox.sessions = [ + "unit", + "cover", + "mypy", + "check_lower_bounds" + # exclude update_lower_bounds from default + "docs", +] + +@nox.session(python=['3.6', '3.7', '3.8', '3.9']) +def unit(session): + """Run the unit test suite.""" + + session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') + session.install('-e', '.') + + session.run( + 'py.test', + '--quiet', + '--cov=google/cloud/aiplatform/v1/schema/predict/instance_v1/', + '--cov-config=.coveragerc', + '--cov-report=term', + '--cov-report=html', + os.path.join('tests', 'unit', ''.join(session.posargs)) + ) + + +@nox.session(python='3.7') +def cover(session): + """Run the final coverage report. + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=100") + + session.run("coverage", "erase") + + +@nox.session(python=['3.6', '3.7']) +def mypy(session): + """Run the type checker.""" + session.install('mypy') + session.install('.') + session.run( + 'mypy', + '--explicit-package-bases', + 'google', + ) + + +@nox.session +def update_lower_bounds(session): + """Update lower bounds in constraints.txt to match setup.py""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'update', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + + +@nox.session +def check_lower_bounds(session): + """Check lower bounds in setup.py are reflected in constraints file""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'check', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + +@nox.session(python='3.6') +def docs(session): + """Build the docs for this library.""" + + session.install("-e", ".") + session.install("sphinx<3.0.0", "alabaster", "recommonmark") + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-W", # warnings as errors + "-T", # show full traceback on exception + "-N", # no colors + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) diff --git a/schema/predict/instance/scripts/fixup_instance_v1_keywords.py b/schema/predict/instance/scripts/fixup_instance_v1_keywords.py new file mode 100644 index 0000000000..d9e8bd0b17 --- /dev/null +++ b/schema/predict/instance/scripts/fixup_instance_v1_keywords.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class instanceCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=instanceCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the instance client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/schema/predict/instance/scripts/fixup_instance_v1beta1_keywords.py b/schema/predict/instance/scripts/fixup_instance_v1beta1_keywords.py new file mode 100644 index 0000000000..d9e8bd0b17 --- /dev/null +++ b/schema/predict/instance/scripts/fixup_instance_v1beta1_keywords.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class instanceCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=instanceCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the instance client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/schema/predict/instance/setup.py b/schema/predict/instance/setup.py new file mode 100644 index 0000000000..42a274ed13 --- /dev/null +++ b/schema/predict/instance/setup.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import io +import os +import setuptools # type: ignore + +version = '0.1.0' + +package_root = os.path.abspath(os.path.dirname(__file__)) + +readme_filename = os.path.join(package_root, 'README.rst') +with io.open(readme_filename, encoding='utf-8') as readme_file: + readme = readme_file.read() + +setuptools.setup( + name='google-cloud-aiplatform-v1-schema-predict-instance', + version=version, + long_description=readme, + packages=setuptools.PEP420PackageFinder.find(), + namespace_packages=('google', 'google.cloud', 'google.cloud.aiplatform', 'google.cloud.aiplatform.v1', 'google.cloud.aiplatform.v1.schema', 'google.cloud.aiplatform.v1.schema.predict'), + platforms='Posix; MacOS X; Windows', + include_package_data=True, + install_requires=( + 'google-api-core[grpc] >= 1.22.2, < 2.0.0dev', + 'libcst >= 0.2.5', + 'proto-plus >= 1.15.0', + 'packaging >= 14.3', ), + python_requires='>=3.6', + classifiers=[ + 'Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Topic :: Internet', + 'Topic :: Software Development :: Libraries :: Python Modules', + ], + zip_safe=False, +) diff --git a/schema/predict/instance/tests/__init__.py b/schema/predict/instance/tests/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/schema/predict/instance/tests/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/schema/predict/instance/tests/unit/__init__.py b/schema/predict/instance/tests/unit/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/schema/predict/instance/tests/unit/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/schema/predict/instance/tests/unit/gapic/__init__.py b/schema/predict/instance/tests/unit/gapic/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/schema/predict/instance/tests/unit/gapic/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/schema/predict/instance/tests/unit/gapic/instance_v1/__init__.py b/schema/predict/instance/tests/unit/gapic/instance_v1/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/schema/predict/instance/tests/unit/gapic/instance_v1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/schema/predict/instance/tests/unit/gapic/instance_v1beta1/__init__.py b/schema/predict/instance/tests/unit/gapic/instance_v1beta1/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/schema/predict/instance/tests/unit/gapic/instance_v1beta1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/schema/predict/params/MANIFEST.in b/schema/predict/params/MANIFEST.in new file mode 100644 index 0000000000..b990fba651 --- /dev/null +++ b/schema/predict/params/MANIFEST.in @@ -0,0 +1,2 @@ +recursive-include google/cloud/aiplatform/v1/schema/predict/params *.py +recursive-include google/cloud/aiplatform/v1/schema/predict/params_v1 *.py diff --git a/schema/predict/params/README.rst b/schema/predict/params/README.rst new file mode 100644 index 0000000000..60be3b2705 --- /dev/null +++ b/schema/predict/params/README.rst @@ -0,0 +1,49 @@ +Python Client for Google Cloud Aiplatform V1 Schema Predict Params API +================================================= + +Quick Start +----------- + +In order to use this library, you first need to go through the following steps: + +1. `Select or create a Cloud Platform project.`_ +2. `Enable billing for your project.`_ +3. Enable the Google Cloud Aiplatform V1 Schema Predict Params API. +4. `Setup Authentication.`_ + +.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project +.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project +.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html + +Installation +~~~~~~~~~~~~ + +Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to +create isolated Python environments. The basic problem it addresses is one of +dependencies and versions, and indirectly permissions. + +With `virtualenv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. + +.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ + + +Mac/Linux +^^^^^^^^^ + +.. code-block:: console + + python3 -m venv + source /bin/activate + /bin/pip install /path/to/library + + +Windows +^^^^^^^ + +.. code-block:: console + + python3 -m venv + \Scripts\activate + \Scripts\pip.exe install \path\to\library diff --git a/schema/predict/params/docs/conf.py b/schema/predict/params/docs/conf.py new file mode 100644 index 0000000000..6917071403 --- /dev/null +++ b/schema/predict/params/docs/conf.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# google-cloud-aiplatform-v1-schema-predict-params documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath("..")) + +__version__ = "0.1.0" + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +needs_sphinx = "1.6.3" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.napoleon", + "sphinx.ext.todo", + "sphinx.ext.viewcode", +] + +# autodoc/autosummary flags +autoclass_content = "both" +autodoc_default_flags = ["members"] +autosummary_generate = True + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# Allow markdown includes (so releases.md can include CHANGLEOG.md) +# http://www.sphinx-doc.org/en/master/markdown.html +source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +source_suffix = [".rst", ".md"] + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = "index" + +# General information about the project. +project = u"google-cloud-aiplatform-v1-schema-predict-params" +copyright = u"2020, Google, LLC" +author = u"Google APIs" # TODO: autogenerate this bit + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = __version__ +# The short X.Y version. +version = ".".join(release.split(".")[0:2]) + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ["_build"] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "alabaster" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "description": "Google Cloud Aiplatform V1 Schema Predict Client Libraries for Python", + "github_user": "googleapis", + "github_repo": "google-cloud-python", + "github_banner": True, + "font_family": "'Roboto', Georgia, sans", + "head_font_family": "'Roboto', Georgia, serif", + "code_font_family": "'Roboto Mono', 'Consolas', monospace", +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "google-cloud-aiplatform-v1-schema-predict-params-doc" + +# -- Options for warnings ------------------------------------------------------ + + +suppress_warnings = [ + # Temporarily suppress this to avoid "more than one target found for + # cross-reference" warning, which are intractable for us to avoid while in + # a mono-repo. + # See https://github.com/sphinx-doc/sphinx/blob + # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 + "ref.python" +] + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # 'preamble': '', + # Latex figure (float) alignment + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + master_doc, + "google-cloud-aiplatform-v1-schema-predict-params.tex", + u"google-cloud-aiplatform-v1-schema-predict-params Documentation", + author, + "manual", + ) +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ( + master_doc, + "google-cloud-aiplatform-v1-schema-predict-params", + u"Google Cloud Aiplatform V1 Schema Predict Params Documentation", + [author], + 1, + ) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + master_doc, + "google-cloud-aiplatform-v1-schema-predict-params", + u"google-cloud-aiplatform-v1-schema-predict-params Documentation", + author, + "google-cloud-aiplatform-v1-schema-predict-params", + "GAPIC library for Google Cloud Aiplatform V1 Schema Predict Params API", + "APIs", + ) +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + "python": ("http://python.readthedocs.org/en/latest/", None), + "gax": ("https://gax-python.readthedocs.org/en/latest/", None), + "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), + "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), + "grpc": ("https://grpc.io/grpc/python/", None), + "requests": ("http://requests.kennethreitz.org/en/stable/", None), + "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), + "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), +} + + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True diff --git a/schema/predict/params/docs/index.rst b/schema/predict/params/docs/index.rst new file mode 100644 index 0000000000..e90a1a5814 --- /dev/null +++ b/schema/predict/params/docs/index.rst @@ -0,0 +1,7 @@ +API Reference +------------- +.. toctree:: + :maxdepth: 2 + + params_v1/services + params_v1/types diff --git a/schema/predict/params/docs/params_v1/services.rst b/schema/predict/params/docs/params_v1/services.rst new file mode 100644 index 0000000000..bf08ea6e98 --- /dev/null +++ b/schema/predict/params/docs/params_v1/services.rst @@ -0,0 +1,4 @@ +Services for Google Cloud Aiplatform V1 Schema Predict Params v1 API +==================================================================== +.. toctree:: + :maxdepth: 2 diff --git a/schema/predict/params/docs/params_v1/types.rst b/schema/predict/params/docs/params_v1/types.rst new file mode 100644 index 0000000000..956ef5224d --- /dev/null +++ b/schema/predict/params/docs/params_v1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform V1 Schema Predict Params v1 API +================================================================= + +.. automodule:: google.cloud.aiplatform.v1.schema.predict.params_v1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/schema/predict/params/docs/params_v1beta1/services.rst b/schema/predict/params/docs/params_v1beta1/services.rst new file mode 100644 index 0000000000..b3b897a0f4 --- /dev/null +++ b/schema/predict/params/docs/params_v1beta1/services.rst @@ -0,0 +1,4 @@ +Services for Google Cloud Aiplatform V1beta1 Schema Predict Params v1beta1 API +============================================================================== +.. toctree:: + :maxdepth: 2 diff --git a/schema/predict/params/docs/params_v1beta1/types.rst b/schema/predict/params/docs/params_v1beta1/types.rst new file mode 100644 index 0000000000..722a1d8ba0 --- /dev/null +++ b/schema/predict/params/docs/params_v1beta1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform V1beta1 Schema Predict Params v1beta1 API +=========================================================================== + +.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params/__init__.py b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params/__init__.py new file mode 100644 index 0000000000..91ae7f0d5c --- /dev/null +++ b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params/__init__.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from google.cloud.aiplatform.v1.schema.predict.params_v1.types.image_classification import ImageClassificationPredictionParams +from google.cloud.aiplatform.v1.schema.predict.params_v1.types.image_object_detection import ImageObjectDetectionPredictionParams +from google.cloud.aiplatform.v1.schema.predict.params_v1.types.image_segmentation import ImageSegmentationPredictionParams +from google.cloud.aiplatform.v1.schema.predict.params_v1.types.video_action_recognition import VideoActionRecognitionPredictionParams +from google.cloud.aiplatform.v1.schema.predict.params_v1.types.video_classification import VideoClassificationPredictionParams +from google.cloud.aiplatform.v1.schema.predict.params_v1.types.video_object_tracking import VideoObjectTrackingPredictionParams + +__all__ = ('ImageClassificationPredictionParams', + 'ImageObjectDetectionPredictionParams', + 'ImageSegmentationPredictionParams', + 'VideoActionRecognitionPredictionParams', + 'VideoClassificationPredictionParams', + 'VideoObjectTrackingPredictionParams', +) diff --git a/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params/py.typed b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params/py.typed new file mode 100644 index 0000000000..df96e61590 --- /dev/null +++ b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1-schema-predict-params package uses inline types. diff --git a/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py new file mode 100644 index 0000000000..91b718b437 --- /dev/null +++ b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from .types.image_classification import ImageClassificationPredictionParams +from .types.image_object_detection import ImageObjectDetectionPredictionParams +from .types.image_segmentation import ImageSegmentationPredictionParams +from .types.video_action_recognition import VideoActionRecognitionPredictionParams +from .types.video_classification import VideoClassificationPredictionParams +from .types.video_object_tracking import VideoObjectTrackingPredictionParams + +__all__ = ( +'ImageClassificationPredictionParams', +'ImageObjectDetectionPredictionParams', +'ImageSegmentationPredictionParams', +'VideoActionRecognitionPredictionParams', +'VideoClassificationPredictionParams', +'VideoObjectTrackingPredictionParams', +) diff --git a/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_metadata.json b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_metadata.json new file mode 100644 index 0000000000..edfffb441b --- /dev/null +++ b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_metadata.json @@ -0,0 +1,7 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform.v1.schema.predict.params_v1", + "protoPackage": "google.cloud.aiplatform.v1.schema.predict.params", + "schema": "1.0" +} diff --git a/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed new file mode 100644 index 0000000000..df96e61590 --- /dev/null +++ b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1-schema-predict-params package uses inline types. diff --git a/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/services/__init__.py b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/services/__init__.py new file mode 100644 index 0000000000..4de65971c2 --- /dev/null +++ b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py new file mode 100644 index 0000000000..70a92bb59c --- /dev/null +++ b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .image_classification import ( + ImageClassificationPredictionParams, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionParams, +) +from .image_segmentation import ( + ImageSegmentationPredictionParams, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionParams, +) +from .video_classification import ( + VideoClassificationPredictionParams, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionParams, +) + +__all__ = ( + 'ImageClassificationPredictionParams', + 'ImageObjectDetectionPredictionParams', + 'ImageSegmentationPredictionParams', + 'VideoActionRecognitionPredictionParams', + 'VideoClassificationPredictionParams', + 'VideoObjectTrackingPredictionParams', +) diff --git a/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py new file mode 100644 index 0000000000..1668600544 --- /dev/null +++ b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.params', + manifest={ + 'ImageClassificationPredictionParams', + }, +) + + +class ImageClassificationPredictionParams(proto.Message): + r"""Prediction model parameters for Image Classification. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + If this number is very high, the Model may + return fewer predictions. Default value is 10. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py new file mode 100644 index 0000000000..43c7814607 --- /dev/null +++ b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.params', + manifest={ + 'ImageObjectDetectionPredictionParams', + }, +) + + +class ImageObjectDetectionPredictionParams(proto.Message): + r"""Prediction model parameters for Image Object Detection. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + Note that number of returned predictions is also + limited by metadata's predictionsLimit. Default + value is 10. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py new file mode 100644 index 0000000000..695a3a7745 --- /dev/null +++ b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.params', + manifest={ + 'ImageSegmentationPredictionParams', + }, +) + + +class ImageSegmentationPredictionParams(proto.Message): + r"""Prediction model parameters for Image Segmentation. + Attributes: + confidence_threshold (float): + When the model predicts category of pixels of + the image, it will only provide predictions for + pixels that it is at least this much confident + about. All other pixels will be classified as + background. Default value is 0.5. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py new file mode 100644 index 0000000000..88e714e9cf --- /dev/null +++ b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.params', + manifest={ + 'VideoActionRecognitionPredictionParams', + }, +) + + +class VideoActionRecognitionPredictionParams(proto.Message): + r"""Prediction model parameters for Video Action Recognition. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The model only returns up to that many top, + by confidence score, predictions per frame of + the video. If this number is very high, the + Model may return fewer predictions per frame. + Default value is 50. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py new file mode 100644 index 0000000000..4f57fe0d3c --- /dev/null +++ b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.params', + manifest={ + 'VideoClassificationPredictionParams', + }, +) + + +class VideoClassificationPredictionParams(proto.Message): + r"""Prediction model parameters for Video Classification. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + If this number is very high, the Model may + return fewer predictions. Default value is + 10,000. + segment_classification (bool): + Set to true to request segment-level + classification. AI Platform returns labels and + their confidence scores for the entire time + segment of the video that user specified in the + input instance. Default value is true + shot_classification (bool): + Set to true to request shot-level + classification. AI Platform determines the + boundaries for each camera shot in the entire + time segment of the video that user specified in + the input instance. AI Platform then returns + labels and their confidence scores for each + detected shot, along with the start and end time + of the shot. + WARNING: Model evaluation is not done for this + classification type, the quality of it depends + on the training data, but there are no metrics + provided to describe that quality. + Default value is false + one_sec_interval_classification (bool): + Set to true to request classification for a + video at one-second intervals. AI Platform + returns labels and their confidence scores for + each second of the entire time segment of the + video that user specified in the input WARNING: + Model evaluation is not done for this + classification type, the quality of it depends + on the training data, but there are no metrics + provided to describe that quality. Default value + is false + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + segment_classification = proto.Field( + proto.BOOL, + number=3, + ) + shot_classification = proto.Field( + proto.BOOL, + number=4, + ) + one_sec_interval_classification = proto.Field( + proto.BOOL, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py new file mode 100644 index 0000000000..820a73e3c6 --- /dev/null +++ b/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.params', + manifest={ + 'VideoObjectTrackingPredictionParams', + }, +) + + +class VideoObjectTrackingPredictionParams(proto.Message): + r"""Prediction model parameters for Video Object Tracking. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The model only returns up to that many top, + by confidence score, predictions per frame of + the video. If this number is very high, the + Model may return fewer predictions per frame. + Default value is 50. + min_bounding_box_size (float): + Only bounding boxes with shortest edge at + least that long as a relative value of video + frame size are returned. Default value is 0.0. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + min_bounding_box_size = proto.Field( + proto.FLOAT, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py new file mode 100644 index 0000000000..464c39f26c --- /dev/null +++ b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_classification import ImageClassificationPredictionParams +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_object_detection import ImageObjectDetectionPredictionParams +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_segmentation import ImageSegmentationPredictionParams +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.video_action_recognition import VideoActionRecognitionPredictionParams +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.video_classification import VideoClassificationPredictionParams +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.video_object_tracking import VideoObjectTrackingPredictionParams + +__all__ = ('ImageClassificationPredictionParams', + 'ImageObjectDetectionPredictionParams', + 'ImageSegmentationPredictionParams', + 'VideoActionRecognitionPredictionParams', + 'VideoClassificationPredictionParams', + 'VideoObjectTrackingPredictionParams', +) diff --git a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed new file mode 100644 index 0000000000..acdcd7bc60 --- /dev/null +++ b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-params package uses inline types. diff --git a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py new file mode 100644 index 0000000000..91b718b437 --- /dev/null +++ b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from .types.image_classification import ImageClassificationPredictionParams +from .types.image_object_detection import ImageObjectDetectionPredictionParams +from .types.image_segmentation import ImageSegmentationPredictionParams +from .types.video_action_recognition import VideoActionRecognitionPredictionParams +from .types.video_classification import VideoClassificationPredictionParams +from .types.video_object_tracking import VideoObjectTrackingPredictionParams + +__all__ = ( +'ImageClassificationPredictionParams', +'ImageObjectDetectionPredictionParams', +'ImageSegmentationPredictionParams', +'VideoActionRecognitionPredictionParams', +'VideoClassificationPredictionParams', +'VideoObjectTrackingPredictionParams', +) diff --git a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_metadata.json b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_metadata.json new file mode 100644 index 0000000000..6b925dd9dc --- /dev/null +++ b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_metadata.json @@ -0,0 +1,7 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1", + "protoPackage": "google.cloud.aiplatform.v1beta1.schema.predict.params", + "schema": "1.0" +} diff --git a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed new file mode 100644 index 0000000000..acdcd7bc60 --- /dev/null +++ b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-params package uses inline types. diff --git a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/services/__init__.py b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/services/__init__.py new file mode 100644 index 0000000000..4de65971c2 --- /dev/null +++ b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py new file mode 100644 index 0000000000..70a92bb59c --- /dev/null +++ b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .image_classification import ( + ImageClassificationPredictionParams, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionParams, +) +from .image_segmentation import ( + ImageSegmentationPredictionParams, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionParams, +) +from .video_classification import ( + VideoClassificationPredictionParams, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionParams, +) + +__all__ = ( + 'ImageClassificationPredictionParams', + 'ImageObjectDetectionPredictionParams', + 'ImageSegmentationPredictionParams', + 'VideoActionRecognitionPredictionParams', + 'VideoClassificationPredictionParams', + 'VideoObjectTrackingPredictionParams', +) diff --git a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py new file mode 100644 index 0000000000..67c5453a93 --- /dev/null +++ b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.params', + manifest={ + 'ImageClassificationPredictionParams', + }, +) + + +class ImageClassificationPredictionParams(proto.Message): + r"""Prediction model parameters for Image Classification. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + If this number is very high, the Model may + return fewer predictions. Default value is 10. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py new file mode 100644 index 0000000000..baed8905ee --- /dev/null +++ b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.params', + manifest={ + 'ImageObjectDetectionPredictionParams', + }, +) + + +class ImageObjectDetectionPredictionParams(proto.Message): + r"""Prediction model parameters for Image Object Detection. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + Note that number of returned predictions is also + limited by metadata's predictionsLimit. Default + value is 10. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py new file mode 100644 index 0000000000..8a5e999504 --- /dev/null +++ b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.params', + manifest={ + 'ImageSegmentationPredictionParams', + }, +) + + +class ImageSegmentationPredictionParams(proto.Message): + r"""Prediction model parameters for Image Segmentation. + Attributes: + confidence_threshold (float): + When the model predicts category of pixels of + the image, it will only provide predictions for + pixels that it is at least this much confident + about. All other pixels will be classified as + background. Default value is 0.5. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py new file mode 100644 index 0000000000..37a8c2bc9c --- /dev/null +++ b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.params', + manifest={ + 'VideoActionRecognitionPredictionParams', + }, +) + + +class VideoActionRecognitionPredictionParams(proto.Message): + r"""Prediction model parameters for Video Action Recognition. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The model only returns up to that many top, + by confidence score, predictions per frame of + the video. If this number is very high, the + Model may return fewer predictions per frame. + Default value is 50. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py new file mode 100644 index 0000000000..e0cbd81db9 --- /dev/null +++ b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.params', + manifest={ + 'VideoClassificationPredictionParams', + }, +) + + +class VideoClassificationPredictionParams(proto.Message): + r"""Prediction model parameters for Video Classification. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + If this number is very high, the Model may + return fewer predictions. Default value is + 10,000. + segment_classification (bool): + Set to true to request segment-level + classification. AI Platform returns labels and + their confidence scores for the entire time + segment of the video that user specified in the + input instance. Default value is true + shot_classification (bool): + Set to true to request shot-level + classification. AI Platform determines the + boundaries for each camera shot in the entire + time segment of the video that user specified in + the input instance. AI Platform then returns + labels and their confidence scores for each + detected shot, along with the start and end time + of the shot. + WARNING: Model evaluation is not done for this + classification type, the quality of it depends + on the training data, but there are no metrics + provided to describe that quality. + Default value is false + one_sec_interval_classification (bool): + Set to true to request classification for a + video at one-second intervals. AI Platform + returns labels and their confidence scores for + each second of the entire time segment of the + video that user specified in the input WARNING: + Model evaluation is not done for this + classification type, the quality of it depends + on the training data, but there are no metrics + provided to describe that quality. Default value + is false + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + segment_classification = proto.Field( + proto.BOOL, + number=3, + ) + shot_classification = proto.Field( + proto.BOOL, + number=4, + ) + one_sec_interval_classification = proto.Field( + proto.BOOL, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py new file mode 100644 index 0000000000..4e0e97f8d6 --- /dev/null +++ b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.params', + manifest={ + 'VideoObjectTrackingPredictionParams', + }, +) + + +class VideoObjectTrackingPredictionParams(proto.Message): + r"""Prediction model parameters for Video Object Tracking. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The model only returns up to that many top, + by confidence score, predictions per frame of + the video. If this number is very high, the + Model may return fewer predictions per frame. + Default value is 50. + min_bounding_box_size (float): + Only bounding boxes with shortest edge at + least that long as a relative value of video + frame size are returned. Default value is 0.0. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + min_bounding_box_size = proto.Field( + proto.FLOAT, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/params/mypy.ini b/schema/predict/params/mypy.ini new file mode 100644 index 0000000000..4505b48543 --- /dev/null +++ b/schema/predict/params/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.6 +namespace_packages = True diff --git a/schema/predict/params/noxfile.py b/schema/predict/params/noxfile.py new file mode 100644 index 0000000000..90e4bfa1c9 --- /dev/null +++ b/schema/predict/params/noxfile.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import pathlib +import shutil +import subprocess +import sys + + +import nox # type: ignore + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + +LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" +PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") + + +nox.sessions = [ + "unit", + "cover", + "mypy", + "check_lower_bounds" + # exclude update_lower_bounds from default + "docs", +] + +@nox.session(python=['3.6', '3.7', '3.8', '3.9']) +def unit(session): + """Run the unit test suite.""" + + session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') + session.install('-e', '.') + + session.run( + 'py.test', + '--quiet', + '--cov=google/cloud/aiplatform/v1/schema/predict/params_v1/', + '--cov-config=.coveragerc', + '--cov-report=term', + '--cov-report=html', + os.path.join('tests', 'unit', ''.join(session.posargs)) + ) + + +@nox.session(python='3.7') +def cover(session): + """Run the final coverage report. + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=100") + + session.run("coverage", "erase") + + +@nox.session(python=['3.6', '3.7']) +def mypy(session): + """Run the type checker.""" + session.install('mypy') + session.install('.') + session.run( + 'mypy', + '--explicit-package-bases', + 'google', + ) + + +@nox.session +def update_lower_bounds(session): + """Update lower bounds in constraints.txt to match setup.py""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'update', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + + +@nox.session +def check_lower_bounds(session): + """Check lower bounds in setup.py are reflected in constraints file""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'check', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + +@nox.session(python='3.6') +def docs(session): + """Build the docs for this library.""" + + session.install("-e", ".") + session.install("sphinx<3.0.0", "alabaster", "recommonmark") + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-W", # warnings as errors + "-T", # show full traceback on exception + "-N", # no colors + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) diff --git a/schema/predict/params/scripts/fixup_params_v1_keywords.py b/schema/predict/params/scripts/fixup_params_v1_keywords.py new file mode 100644 index 0000000000..17915abed8 --- /dev/null +++ b/schema/predict/params/scripts/fixup_params_v1_keywords.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class paramsCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=paramsCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the params client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/schema/predict/params/scripts/fixup_params_v1beta1_keywords.py b/schema/predict/params/scripts/fixup_params_v1beta1_keywords.py new file mode 100644 index 0000000000..17915abed8 --- /dev/null +++ b/schema/predict/params/scripts/fixup_params_v1beta1_keywords.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class paramsCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=paramsCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the params client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/schema/predict/params/setup.py b/schema/predict/params/setup.py new file mode 100644 index 0000000000..befe8ab40c --- /dev/null +++ b/schema/predict/params/setup.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import io +import os +import setuptools # type: ignore + +version = '0.1.0' + +package_root = os.path.abspath(os.path.dirname(__file__)) + +readme_filename = os.path.join(package_root, 'README.rst') +with io.open(readme_filename, encoding='utf-8') as readme_file: + readme = readme_file.read() + +setuptools.setup( + name='google-cloud-aiplatform-v1-schema-predict-params', + version=version, + long_description=readme, + packages=setuptools.PEP420PackageFinder.find(), + namespace_packages=('google', 'google.cloud', 'google.cloud.aiplatform', 'google.cloud.aiplatform.v1', 'google.cloud.aiplatform.v1.schema', 'google.cloud.aiplatform.v1.schema.predict'), + platforms='Posix; MacOS X; Windows', + include_package_data=True, + install_requires=( + 'google-api-core[grpc] >= 1.22.2, < 2.0.0dev', + 'libcst >= 0.2.5', + 'proto-plus >= 1.15.0', + 'packaging >= 14.3', ), + python_requires='>=3.6', + classifiers=[ + 'Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Topic :: Internet', + 'Topic :: Software Development :: Libraries :: Python Modules', + ], + zip_safe=False, +) diff --git a/schema/predict/params/tests/__init__.py b/schema/predict/params/tests/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/schema/predict/params/tests/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/schema/predict/params/tests/unit/__init__.py b/schema/predict/params/tests/unit/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/schema/predict/params/tests/unit/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/schema/predict/params/tests/unit/gapic/__init__.py b/schema/predict/params/tests/unit/gapic/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/schema/predict/params/tests/unit/gapic/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/schema/predict/params/tests/unit/gapic/params_v1/__init__.py b/schema/predict/params/tests/unit/gapic/params_v1/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/schema/predict/params/tests/unit/gapic/params_v1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/schema/predict/params/tests/unit/gapic/params_v1beta1/__init__.py b/schema/predict/params/tests/unit/gapic/params_v1beta1/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/schema/predict/params/tests/unit/gapic/params_v1beta1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/schema/predict/prediction/MANIFEST.in b/schema/predict/prediction/MANIFEST.in new file mode 100644 index 0000000000..4cca31bd6b --- /dev/null +++ b/schema/predict/prediction/MANIFEST.in @@ -0,0 +1,2 @@ +recursive-include google/cloud/aiplatform/v1/schema/predict/prediction *.py +recursive-include google/cloud/aiplatform/v1/schema/predict/prediction_v1 *.py diff --git a/schema/predict/prediction/README.rst b/schema/predict/prediction/README.rst new file mode 100644 index 0000000000..acb7f67d13 --- /dev/null +++ b/schema/predict/prediction/README.rst @@ -0,0 +1,49 @@ +Python Client for Google Cloud Aiplatform V1 Schema Predict Prediction API +================================================= + +Quick Start +----------- + +In order to use this library, you first need to go through the following steps: + +1. `Select or create a Cloud Platform project.`_ +2. `Enable billing for your project.`_ +3. Enable the Google Cloud Aiplatform V1 Schema Predict Prediction API. +4. `Setup Authentication.`_ + +.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project +.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project +.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html + +Installation +~~~~~~~~~~~~ + +Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to +create isolated Python environments. The basic problem it addresses is one of +dependencies and versions, and indirectly permissions. + +With `virtualenv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. + +.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ + + +Mac/Linux +^^^^^^^^^ + +.. code-block:: console + + python3 -m venv + source /bin/activate + /bin/pip install /path/to/library + + +Windows +^^^^^^^ + +.. code-block:: console + + python3 -m venv + \Scripts\activate + \Scripts\pip.exe install \path\to\library diff --git a/schema/predict/prediction/docs/conf.py b/schema/predict/prediction/docs/conf.py new file mode 100644 index 0000000000..c0f73900a9 --- /dev/null +++ b/schema/predict/prediction/docs/conf.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# google-cloud-aiplatform-v1-schema-predict-prediction documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath("..")) + +__version__ = "0.1.0" + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +needs_sphinx = "1.6.3" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.napoleon", + "sphinx.ext.todo", + "sphinx.ext.viewcode", +] + +# autodoc/autosummary flags +autoclass_content = "both" +autodoc_default_flags = ["members"] +autosummary_generate = True + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# Allow markdown includes (so releases.md can include CHANGLEOG.md) +# http://www.sphinx-doc.org/en/master/markdown.html +source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +source_suffix = [".rst", ".md"] + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = "index" + +# General information about the project. +project = u"google-cloud-aiplatform-v1-schema-predict-prediction" +copyright = u"2020, Google, LLC" +author = u"Google APIs" # TODO: autogenerate this bit + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = __version__ +# The short X.Y version. +version = ".".join(release.split(".")[0:2]) + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ["_build"] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "alabaster" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "description": "Google Cloud Aiplatform V1 Schema Predict Client Libraries for Python", + "github_user": "googleapis", + "github_repo": "google-cloud-python", + "github_banner": True, + "font_family": "'Roboto', Georgia, sans", + "head_font_family": "'Roboto', Georgia, serif", + "code_font_family": "'Roboto Mono', 'Consolas', monospace", +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "google-cloud-aiplatform-v1-schema-predict-prediction-doc" + +# -- Options for warnings ------------------------------------------------------ + + +suppress_warnings = [ + # Temporarily suppress this to avoid "more than one target found for + # cross-reference" warning, which are intractable for us to avoid while in + # a mono-repo. + # See https://github.com/sphinx-doc/sphinx/blob + # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 + "ref.python" +] + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # 'preamble': '', + # Latex figure (float) alignment + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + master_doc, + "google-cloud-aiplatform-v1-schema-predict-prediction.tex", + u"google-cloud-aiplatform-v1-schema-predict-prediction Documentation", + author, + "manual", + ) +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ( + master_doc, + "google-cloud-aiplatform-v1-schema-predict-prediction", + u"Google Cloud Aiplatform V1 Schema Predict Prediction Documentation", + [author], + 1, + ) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + master_doc, + "google-cloud-aiplatform-v1-schema-predict-prediction", + u"google-cloud-aiplatform-v1-schema-predict-prediction Documentation", + author, + "google-cloud-aiplatform-v1-schema-predict-prediction", + "GAPIC library for Google Cloud Aiplatform V1 Schema Predict Prediction API", + "APIs", + ) +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + "python": ("http://python.readthedocs.org/en/latest/", None), + "gax": ("https://gax-python.readthedocs.org/en/latest/", None), + "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), + "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), + "grpc": ("https://grpc.io/grpc/python/", None), + "requests": ("http://requests.kennethreitz.org/en/stable/", None), + "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), + "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), +} + + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True diff --git a/schema/predict/prediction/docs/index.rst b/schema/predict/prediction/docs/index.rst new file mode 100644 index 0000000000..f28df12991 --- /dev/null +++ b/schema/predict/prediction/docs/index.rst @@ -0,0 +1,7 @@ +API Reference +------------- +.. toctree:: + :maxdepth: 2 + + prediction_v1/services + prediction_v1/types diff --git a/schema/predict/prediction/docs/prediction_v1/services.rst b/schema/predict/prediction/docs/prediction_v1/services.rst new file mode 100644 index 0000000000..ad6f034387 --- /dev/null +++ b/schema/predict/prediction/docs/prediction_v1/services.rst @@ -0,0 +1,4 @@ +Services for Google Cloud Aiplatform V1 Schema Predict Prediction v1 API +======================================================================== +.. toctree:: + :maxdepth: 2 diff --git a/schema/predict/prediction/docs/prediction_v1/types.rst b/schema/predict/prediction/docs/prediction_v1/types.rst new file mode 100644 index 0000000000..a97faf34de --- /dev/null +++ b/schema/predict/prediction/docs/prediction_v1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform V1 Schema Predict Prediction v1 API +===================================================================== + +.. automodule:: google.cloud.aiplatform.v1.schema.predict.prediction_v1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/schema/predict/prediction/docs/prediction_v1beta1/services.rst b/schema/predict/prediction/docs/prediction_v1beta1/services.rst new file mode 100644 index 0000000000..6de5e17520 --- /dev/null +++ b/schema/predict/prediction/docs/prediction_v1beta1/services.rst @@ -0,0 +1,4 @@ +Services for Google Cloud Aiplatform V1beta1 Schema Predict Prediction v1beta1 API +================================================================================== +.. toctree:: + :maxdepth: 2 diff --git a/schema/predict/prediction/docs/prediction_v1beta1/types.rst b/schema/predict/prediction/docs/prediction_v1beta1/types.rst new file mode 100644 index 0000000000..b14182d6d7 --- /dev/null +++ b/schema/predict/prediction/docs/prediction_v1beta1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform V1beta1 Schema Predict Prediction v1beta1 API +=============================================================================== + +.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py new file mode 100644 index 0000000000..27d9f97862 --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.classification import ClassificationPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.image_object_detection import ImageObjectDetectionPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.image_segmentation import ImageSegmentationPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.tabular_classification import TabularClassificationPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.tabular_regression import TabularRegressionPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.text_extraction import TextExtractionPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.text_sentiment import TextSentimentPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.video_action_recognition import VideoActionRecognitionPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.video_classification import VideoClassificationPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.video_object_tracking import VideoObjectTrackingPredictionResult + +__all__ = ('ClassificationPredictionResult', + 'ImageObjectDetectionPredictionResult', + 'ImageSegmentationPredictionResult', + 'TabularClassificationPredictionResult', + 'TabularRegressionPredictionResult', + 'TextExtractionPredictionResult', + 'TextSentimentPredictionResult', + 'VideoActionRecognitionPredictionResult', + 'VideoClassificationPredictionResult', + 'VideoObjectTrackingPredictionResult', +) diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed new file mode 100644 index 0000000000..472fa4d8cc --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1-schema-predict-prediction package uses inline types. diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py new file mode 100644 index 0000000000..3cf9304526 --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from .types.classification import ClassificationPredictionResult +from .types.image_object_detection import ImageObjectDetectionPredictionResult +from .types.image_segmentation import ImageSegmentationPredictionResult +from .types.tabular_classification import TabularClassificationPredictionResult +from .types.tabular_regression import TabularRegressionPredictionResult +from .types.text_extraction import TextExtractionPredictionResult +from .types.text_sentiment import TextSentimentPredictionResult +from .types.video_action_recognition import VideoActionRecognitionPredictionResult +from .types.video_classification import VideoClassificationPredictionResult +from .types.video_object_tracking import VideoObjectTrackingPredictionResult + +__all__ = ( +'ClassificationPredictionResult', +'ImageObjectDetectionPredictionResult', +'ImageSegmentationPredictionResult', +'TabularClassificationPredictionResult', +'TabularRegressionPredictionResult', +'TextExtractionPredictionResult', +'TextSentimentPredictionResult', +'VideoActionRecognitionPredictionResult', +'VideoClassificationPredictionResult', +'VideoObjectTrackingPredictionResult', +) diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_metadata.json b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_metadata.json new file mode 100644 index 0000000000..ba1d67a00c --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_metadata.json @@ -0,0 +1,7 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform.v1.schema.predict.prediction_v1", + "protoPackage": "google.cloud.aiplatform.v1.schema.predict.prediction", + "schema": "1.0" +} diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed new file mode 100644 index 0000000000..472fa4d8cc --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1-schema-predict-prediction package uses inline types. diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/services/__init__.py b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/services/__init__.py new file mode 100644 index 0000000000..4de65971c2 --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py new file mode 100644 index 0000000000..b7b7c056aa --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .classification import ( + ClassificationPredictionResult, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionResult, +) +from .image_segmentation import ( + ImageSegmentationPredictionResult, +) +from .tabular_classification import ( + TabularClassificationPredictionResult, +) +from .tabular_regression import ( + TabularRegressionPredictionResult, +) +from .text_extraction import ( + TextExtractionPredictionResult, +) +from .text_sentiment import ( + TextSentimentPredictionResult, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionResult, +) +from .video_classification import ( + VideoClassificationPredictionResult, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionResult, +) + +__all__ = ( + 'ClassificationPredictionResult', + 'ImageObjectDetectionPredictionResult', + 'ImageSegmentationPredictionResult', + 'TabularClassificationPredictionResult', + 'TabularRegressionPredictionResult', + 'TextExtractionPredictionResult', + 'TextSentimentPredictionResult', + 'VideoActionRecognitionPredictionResult', + 'VideoClassificationPredictionResult', + 'VideoObjectTrackingPredictionResult', +) diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py new file mode 100644 index 0000000000..2cc31f3476 --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'ClassificationPredictionResult', + }, +) + + +class ClassificationPredictionResult(proto.Message): + r"""Prediction output format for Image and Text Classification. + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified, ordered by the confidence + score descendingly. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + """ + + ids = proto.RepeatedField( + proto.INT64, + number=1, + ) + display_names = proto.RepeatedField( + proto.STRING, + number=2, + ) + confidences = proto.RepeatedField( + proto.FLOAT, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py new file mode 100644 index 0000000000..74178c5502 --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'ImageObjectDetectionPredictionResult', + }, +) + + +class ImageObjectDetectionPredictionResult(proto.Message): + r"""Prediction output format for Image Object Detection. + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified, ordered by the confidence + score descendingly. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + bboxes (Sequence[google.protobuf.struct_pb2.ListValue]): + Bounding boxes, i.e. the rectangles over the image, that + pinpoint the found AnnotationSpecs. Given in order that + matches the IDs. Each bounding box is an array of 4 numbers + ``xMin``, ``xMax``, ``yMin``, and ``yMax``, which represent + the extremal coordinates of the box. They are relative to + the image size, and the point 0,0 is in the top left of the + image. + """ + + ids = proto.RepeatedField( + proto.INT64, + number=1, + ) + display_names = proto.RepeatedField( + proto.STRING, + number=2, + ) + confidences = proto.RepeatedField( + proto.FLOAT, + number=3, + ) + bboxes = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=struct_pb2.ListValue, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py new file mode 100644 index 0000000000..e93991222a --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'ImageSegmentationPredictionResult', + }, +) + + +class ImageSegmentationPredictionResult(proto.Message): + r"""Prediction output format for Image Segmentation. + Attributes: + category_mask (str): + A PNG image where each pixel in the mask + represents the category in which the pixel in + the original image was predicted to belong to. + The size of this image will be the same as the + original image. The mapping between the + AnntoationSpec and the color can be found in + model's metadata. The model will choose the most + likely category and if none of the categories + reach the confidence threshold, the pixel will + be marked as background. + confidence_mask (str): + A one channel image which is encoded as an + 8bit lossless PNG. The size of the image will be + the same as the original image. For a specific + pixel, darker color means less confidence in + correctness of the cateogry in the categoryMask + for the corresponding pixel. Black means no + confidence and white means complete confidence. + """ + + category_mask = proto.Field( + proto.STRING, + number=1, + ) + confidence_mask = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py new file mode 100644 index 0000000000..a36bf8f991 --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'TabularClassificationPredictionResult', + }, +) + + +class TabularClassificationPredictionResult(proto.Message): + r"""Prediction output format for Tabular Classification. + Attributes: + classes (Sequence[str]): + The name of the classes being classified, + contains all possible values of the target + column. + scores (Sequence[float]): + The model's confidence in each class being + correct, higher value means higher confidence. + The N-th score corresponds to the N-th class in + classes. + """ + + classes = proto.RepeatedField( + proto.STRING, + number=1, + ) + scores = proto.RepeatedField( + proto.FLOAT, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py new file mode 100644 index 0000000000..56af2af196 --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'TabularRegressionPredictionResult', + }, +) + + +class TabularRegressionPredictionResult(proto.Message): + r"""Prediction output format for Tabular Regression. + Attributes: + value (float): + The regression value. + lower_bound (float): + The lower bound of the prediction interval. + upper_bound (float): + The upper bound of the prediction interval. + """ + + value = proto.Field( + proto.FLOAT, + number=1, + ) + lower_bound = proto.Field( + proto.FLOAT, + number=2, + ) + upper_bound = proto.Field( + proto.FLOAT, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py new file mode 100644 index 0000000000..3e7398f165 --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'TextExtractionPredictionResult', + }, +) + + +class TextExtractionPredictionResult(proto.Message): + r"""Prediction output format for Text Extraction. + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified, ordered by the confidence + score descendingly. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + text_segment_start_offsets (Sequence[int]): + The start offsets, inclusive, of the text + segment in which the AnnotationSpec has been + identified. Expressed as a zero-based number of + characters as measured from the start of the + text snippet. + text_segment_end_offsets (Sequence[int]): + The end offsets, inclusive, of the text + segment in which the AnnotationSpec has been + identified. Expressed as a zero-based number of + characters as measured from the start of the + text snippet. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + """ + + ids = proto.RepeatedField( + proto.INT64, + number=1, + ) + display_names = proto.RepeatedField( + proto.STRING, + number=2, + ) + text_segment_start_offsets = proto.RepeatedField( + proto.INT64, + number=3, + ) + text_segment_end_offsets = proto.RepeatedField( + proto.INT64, + number=4, + ) + confidences = proto.RepeatedField( + proto.FLOAT, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py new file mode 100644 index 0000000000..135db45729 --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'TextSentimentPredictionResult', + }, +) + + +class TextSentimentPredictionResult(proto.Message): + r"""Prediction output format for Text Sentiment + Attributes: + sentiment (int): + The integer sentiment labels between 0 + (inclusive) and sentimentMax label (inclusive), + while 0 maps to the least positive sentiment and + sentimentMax maps to the most positive one. The + higher the score is, the more positive the + sentiment in the text snippet is. Note: + sentimentMax is an integer value between 1 + (inclusive) and 10 (inclusive). + """ + + sentiment = proto.Field( + proto.INT32, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py new file mode 100644 index 0000000000..5a853655ae --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import wrappers_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'VideoActionRecognitionPredictionResult', + }, +) + + +class VideoActionRecognitionPredictionResult(proto.Message): + r"""Prediction output format for Video Action Recognition. + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + time_segment_start (google.protobuf.duration_pb2.Duration): + The beginning, inclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + time_segment_end (google.protobuf.duration_pb2.Duration): + The end, exclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + confidence (google.protobuf.wrappers_pb2.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.MESSAGE, + number=4, + message=duration_pb2.Duration, + ) + time_segment_end = proto.Field( + proto.MESSAGE, + number=5, + message=duration_pb2.Duration, + ) + confidence = proto.Field( + proto.MESSAGE, + number=6, + message=wrappers_pb2.FloatValue, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py new file mode 100644 index 0000000000..da14b3253e --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import wrappers_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'VideoClassificationPredictionResult', + }, +) + + +class VideoClassificationPredictionResult(proto.Message): + r"""Prediction output format for Video Classification. + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + type_ (str): + The type of the prediction. The requested + types can be configured via parameters. This + will be one of - segment-classification + - shot-classification + - one-sec-interval-classification + time_segment_start (google.protobuf.duration_pb2.Duration): + The beginning, inclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. Note that for + 'segment-classification' prediction type, this + equals the original 'timeSegmentStart' from the + input instance, for other types it is the start + of a shot or a 1 second interval respectively. + time_segment_end (google.protobuf.duration_pb2.Duration): + The end, exclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. Note that for + 'segment-classification' prediction type, this + equals the original 'timeSegmentEnd' from the + input instance, for other types it is the end of + a shot or a 1 second interval respectively. + confidence (google.protobuf.wrappers_pb2.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + type_ = proto.Field( + proto.STRING, + number=3, + ) + time_segment_start = proto.Field( + proto.MESSAGE, + number=4, + message=duration_pb2.Duration, + ) + time_segment_end = proto.Field( + proto.MESSAGE, + number=5, + message=duration_pb2.Duration, + ) + confidence = proto.Field( + proto.MESSAGE, + number=6, + message=wrappers_pb2.FloatValue, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py new file mode 100644 index 0000000000..9b70e913cd --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py @@ -0,0 +1,144 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import wrappers_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'VideoObjectTrackingPredictionResult', + }, +) + + +class VideoObjectTrackingPredictionResult(proto.Message): + r"""Prediction output format for Video Object Tracking. + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + time_segment_start (google.protobuf.duration_pb2.Duration): + The beginning, inclusive, of the video's time + segment in which the object instance has been + detected. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + time_segment_end (google.protobuf.duration_pb2.Duration): + The end, inclusive, of the video's time + segment in which the object instance has been + detected. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + confidence (google.protobuf.wrappers_pb2.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + frames (Sequence[google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.VideoObjectTrackingPredictionResult.Frame]): + All of the frames of the video in which a + single object instance has been detected. The + bounding boxes in the frames identify the same + object. + """ + + class Frame(proto.Message): + r"""The fields ``xMin``, ``xMax``, ``yMin``, and ``yMax`` refer to a + bounding box, i.e. the rectangle over the video frame pinpointing + the found AnnotationSpec. The coordinates are relative to the frame + size, and the point 0,0 is in the top left of the frame. + + Attributes: + time_offset (google.protobuf.duration_pb2.Duration): + A time (frame) of a video in which the object + has been detected. Expressed as a number of + seconds as measured from the start of the video, + with fractions up to a microsecond precision, + and with "s" appended at the end. + x_min (google.protobuf.wrappers_pb2.FloatValue): + The leftmost coordinate of the bounding box. + x_max (google.protobuf.wrappers_pb2.FloatValue): + The rightmost coordinate of the bounding box. + y_min (google.protobuf.wrappers_pb2.FloatValue): + The topmost coordinate of the bounding box. + y_max (google.protobuf.wrappers_pb2.FloatValue): + The bottommost coordinate of the bounding + box. + """ + + time_offset = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + x_min = proto.Field( + proto.MESSAGE, + number=2, + message=wrappers_pb2.FloatValue, + ) + x_max = proto.Field( + proto.MESSAGE, + number=3, + message=wrappers_pb2.FloatValue, + ) + y_min = proto.Field( + proto.MESSAGE, + number=4, + message=wrappers_pb2.FloatValue, + ) + y_max = proto.Field( + proto.MESSAGE, + number=5, + message=wrappers_pb2.FloatValue, + ) + + id = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.MESSAGE, + number=3, + message=duration_pb2.Duration, + ) + time_segment_end = proto.Field( + proto.MESSAGE, + number=4, + message=duration_pb2.Duration, + ) + confidence = proto.Field( + proto.MESSAGE, + number=5, + message=wrappers_pb2.FloatValue, + ) + frames = proto.RepeatedField( + proto.MESSAGE, + number=6, + message=Frame, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py new file mode 100644 index 0000000000..0b54451ca0 --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.classification import ClassificationPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.image_object_detection import ImageObjectDetectionPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.image_segmentation import ImageSegmentationPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.tabular_classification import TabularClassificationPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.tabular_regression import TabularRegressionPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.text_extraction import TextExtractionPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.text_sentiment import TextSentimentPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.time_series_forecasting import TimeSeriesForecastingPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.video_action_recognition import VideoActionRecognitionPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.video_classification import VideoClassificationPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.video_object_tracking import VideoObjectTrackingPredictionResult + +__all__ = ('ClassificationPredictionResult', + 'ImageObjectDetectionPredictionResult', + 'ImageSegmentationPredictionResult', + 'TabularClassificationPredictionResult', + 'TabularRegressionPredictionResult', + 'TextExtractionPredictionResult', + 'TextSentimentPredictionResult', + 'TimeSeriesForecastingPredictionResult', + 'VideoActionRecognitionPredictionResult', + 'VideoClassificationPredictionResult', + 'VideoObjectTrackingPredictionResult', +) diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed new file mode 100644 index 0000000000..8cf97d7107 --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-prediction package uses inline types. diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py new file mode 100644 index 0000000000..495759c24b --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from .types.classification import ClassificationPredictionResult +from .types.image_object_detection import ImageObjectDetectionPredictionResult +from .types.image_segmentation import ImageSegmentationPredictionResult +from .types.tabular_classification import TabularClassificationPredictionResult +from .types.tabular_regression import TabularRegressionPredictionResult +from .types.text_extraction import TextExtractionPredictionResult +from .types.text_sentiment import TextSentimentPredictionResult +from .types.time_series_forecasting import TimeSeriesForecastingPredictionResult +from .types.video_action_recognition import VideoActionRecognitionPredictionResult +from .types.video_classification import VideoClassificationPredictionResult +from .types.video_object_tracking import VideoObjectTrackingPredictionResult + +__all__ = ( +'ClassificationPredictionResult', +'ImageObjectDetectionPredictionResult', +'ImageSegmentationPredictionResult', +'TabularClassificationPredictionResult', +'TabularRegressionPredictionResult', +'TextExtractionPredictionResult', +'TextSentimentPredictionResult', +'TimeSeriesForecastingPredictionResult', +'VideoActionRecognitionPredictionResult', +'VideoClassificationPredictionResult', +'VideoObjectTrackingPredictionResult', +) diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_metadata.json b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_metadata.json new file mode 100644 index 0000000000..99d3dc6402 --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_metadata.json @@ -0,0 +1,7 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1", + "protoPackage": "google.cloud.aiplatform.v1beta1.schema.predict.prediction", + "schema": "1.0" +} diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed new file mode 100644 index 0000000000..8cf97d7107 --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-prediction package uses inline types. diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/services/__init__.py b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/services/__init__.py new file mode 100644 index 0000000000..4de65971c2 --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py new file mode 100644 index 0000000000..f3b70f66dd --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .classification import ( + ClassificationPredictionResult, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionResult, +) +from .image_segmentation import ( + ImageSegmentationPredictionResult, +) +from .tabular_classification import ( + TabularClassificationPredictionResult, +) +from .tabular_regression import ( + TabularRegressionPredictionResult, +) +from .text_extraction import ( + TextExtractionPredictionResult, +) +from .text_sentiment import ( + TextSentimentPredictionResult, +) +from .time_series_forecasting import ( + TimeSeriesForecastingPredictionResult, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionResult, +) +from .video_classification import ( + VideoClassificationPredictionResult, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionResult, +) + +__all__ = ( + 'ClassificationPredictionResult', + 'ImageObjectDetectionPredictionResult', + 'ImageSegmentationPredictionResult', + 'TabularClassificationPredictionResult', + 'TabularRegressionPredictionResult', + 'TextExtractionPredictionResult', + 'TextSentimentPredictionResult', + 'TimeSeriesForecastingPredictionResult', + 'VideoActionRecognitionPredictionResult', + 'VideoClassificationPredictionResult', + 'VideoObjectTrackingPredictionResult', +) diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py new file mode 100644 index 0000000000..858691c322 --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'ClassificationPredictionResult', + }, +) + + +class ClassificationPredictionResult(proto.Message): + r"""Prediction output format for Image and Text Classification. + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified, ordered by the confidence + score descendingly. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + """ + + ids = proto.RepeatedField( + proto.INT64, + number=1, + ) + display_names = proto.RepeatedField( + proto.STRING, + number=2, + ) + confidences = proto.RepeatedField( + proto.FLOAT, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py new file mode 100644 index 0000000000..d787871e99 --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'ImageObjectDetectionPredictionResult', + }, +) + + +class ImageObjectDetectionPredictionResult(proto.Message): + r"""Prediction output format for Image Object Detection. + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified, ordered by the confidence + score descendingly. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + bboxes (Sequence[google.protobuf.struct_pb2.ListValue]): + Bounding boxes, i.e. the rectangles over the image, that + pinpoint the found AnnotationSpecs. Given in order that + matches the IDs. Each bounding box is an array of 4 numbers + ``xMin``, ``xMax``, ``yMin``, and ``yMax``, which represent + the extremal coordinates of the box. They are relative to + the image size, and the point 0,0 is in the top left of the + image. + """ + + ids = proto.RepeatedField( + proto.INT64, + number=1, + ) + display_names = proto.RepeatedField( + proto.STRING, + number=2, + ) + confidences = proto.RepeatedField( + proto.FLOAT, + number=3, + ) + bboxes = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=struct_pb2.ListValue, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py new file mode 100644 index 0000000000..92cc20720c --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'ImageSegmentationPredictionResult', + }, +) + + +class ImageSegmentationPredictionResult(proto.Message): + r"""Prediction output format for Image Segmentation. + Attributes: + category_mask (str): + A PNG image where each pixel in the mask + represents the category in which the pixel in + the original image was predicted to belong to. + The size of this image will be the same as the + original image. The mapping between the + AnntoationSpec and the color can be found in + model's metadata. The model will choose the most + likely category and if none of the categories + reach the confidence threshold, the pixel will + be marked as background. + confidence_mask (str): + A one channel image which is encoded as an + 8bit lossless PNG. The size of the image will be + the same as the original image. For a specific + pixel, darker color means less confidence in + correctness of the cateogry in the categoryMask + for the corresponding pixel. Black means no + confidence and white means complete confidence. + """ + + category_mask = proto.Field( + proto.STRING, + number=1, + ) + confidence_mask = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py new file mode 100644 index 0000000000..8a437022fd --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'TabularClassificationPredictionResult', + }, +) + + +class TabularClassificationPredictionResult(proto.Message): + r"""Prediction output format for Tabular Classification. + Attributes: + classes (Sequence[str]): + The name of the classes being classified, + contains all possible values of the target + column. + scores (Sequence[float]): + The model's confidence in each class being + correct, higher value means higher confidence. + The N-th score corresponds to the N-th class in + classes. + """ + + classes = proto.RepeatedField( + proto.STRING, + number=1, + ) + scores = proto.RepeatedField( + proto.FLOAT, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py new file mode 100644 index 0000000000..a49f6f55ce --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'TabularRegressionPredictionResult', + }, +) + + +class TabularRegressionPredictionResult(proto.Message): + r"""Prediction output format for Tabular Regression. + Attributes: + value (float): + The regression value. + lower_bound (float): + The lower bound of the prediction interval. + upper_bound (float): + The upper bound of the prediction interval. + """ + + value = proto.Field( + proto.FLOAT, + number=1, + ) + lower_bound = proto.Field( + proto.FLOAT, + number=2, + ) + upper_bound = proto.Field( + proto.FLOAT, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py new file mode 100644 index 0000000000..a92d9caefa --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'TextExtractionPredictionResult', + }, +) + + +class TextExtractionPredictionResult(proto.Message): + r"""Prediction output format for Text Extraction. + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified, ordered by the confidence + score descendingly. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + text_segment_start_offsets (Sequence[int]): + The start offsets, inclusive, of the text + segment in which the AnnotationSpec has been + identified. Expressed as a zero-based number of + characters as measured from the start of the + text snippet. + text_segment_end_offsets (Sequence[int]): + The end offsets, inclusive, of the text + segment in which the AnnotationSpec has been + identified. Expressed as a zero-based number of + characters as measured from the start of the + text snippet. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + """ + + ids = proto.RepeatedField( + proto.INT64, + number=1, + ) + display_names = proto.RepeatedField( + proto.STRING, + number=2, + ) + text_segment_start_offsets = proto.RepeatedField( + proto.INT64, + number=3, + ) + text_segment_end_offsets = proto.RepeatedField( + proto.INT64, + number=4, + ) + confidences = proto.RepeatedField( + proto.FLOAT, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py new file mode 100644 index 0000000000..4967b02aae --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'TextSentimentPredictionResult', + }, +) + + +class TextSentimentPredictionResult(proto.Message): + r"""Prediction output format for Text Sentiment + Attributes: + sentiment (int): + The integer sentiment labels between 0 + (inclusive) and sentimentMax label (inclusive), + while 0 maps to the least positive sentiment and + sentimentMax maps to the most positive one. The + higher the score is, the more positive the + sentiment in the text snippet is. Note: + sentimentMax is an integer value between 1 + (inclusive) and 10 (inclusive). + """ + + sentiment = proto.Field( + proto.INT32, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py new file mode 100644 index 0000000000..67a3cd9dff --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'TimeSeriesForecastingPredictionResult', + }, +) + + +class TimeSeriesForecastingPredictionResult(proto.Message): + r"""Prediction output format for Time Series Forecasting. + Attributes: + value (float): + The regression value. + """ + + value = proto.Field( + proto.FLOAT, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py new file mode 100644 index 0000000000..bc53328da4 --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import wrappers_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'VideoActionRecognitionPredictionResult', + }, +) + + +class VideoActionRecognitionPredictionResult(proto.Message): + r"""Prediction output format for Video Action Recognition. + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + time_segment_start (google.protobuf.duration_pb2.Duration): + The beginning, inclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + time_segment_end (google.protobuf.duration_pb2.Duration): + The end, exclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + confidence (google.protobuf.wrappers_pb2.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.MESSAGE, + number=4, + message=duration_pb2.Duration, + ) + time_segment_end = proto.Field( + proto.MESSAGE, + number=5, + message=duration_pb2.Duration, + ) + confidence = proto.Field( + proto.MESSAGE, + number=6, + message=wrappers_pb2.FloatValue, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py new file mode 100644 index 0000000000..95439add5e --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import wrappers_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'VideoClassificationPredictionResult', + }, +) + + +class VideoClassificationPredictionResult(proto.Message): + r"""Prediction output format for Video Classification. + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + type_ (str): + The type of the prediction. The requested + types can be configured via parameters. This + will be one of - segment-classification + - shot-classification + - one-sec-interval-classification + time_segment_start (google.protobuf.duration_pb2.Duration): + The beginning, inclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. Note that for + 'segment-classification' prediction type, this + equals the original 'timeSegmentStart' from the + input instance, for other types it is the start + of a shot or a 1 second interval respectively. + time_segment_end (google.protobuf.duration_pb2.Duration): + The end, exclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. Note that for + 'segment-classification' prediction type, this + equals the original 'timeSegmentEnd' from the + input instance, for other types it is the end of + a shot or a 1 second interval respectively. + confidence (google.protobuf.wrappers_pb2.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + type_ = proto.Field( + proto.STRING, + number=3, + ) + time_segment_start = proto.Field( + proto.MESSAGE, + number=4, + message=duration_pb2.Duration, + ) + time_segment_end = proto.Field( + proto.MESSAGE, + number=5, + message=duration_pb2.Duration, + ) + confidence = proto.Field( + proto.MESSAGE, + number=6, + message=wrappers_pb2.FloatValue, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py new file mode 100644 index 0000000000..34cf7ab1b9 --- /dev/null +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py @@ -0,0 +1,144 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import wrappers_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'VideoObjectTrackingPredictionResult', + }, +) + + +class VideoObjectTrackingPredictionResult(proto.Message): + r"""Prediction output format for Video Object Tracking. + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + time_segment_start (google.protobuf.duration_pb2.Duration): + The beginning, inclusive, of the video's time + segment in which the object instance has been + detected. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + time_segment_end (google.protobuf.duration_pb2.Duration): + The end, inclusive, of the video's time + segment in which the object instance has been + detected. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + confidence (google.protobuf.wrappers_pb2.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + frames (Sequence[google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.VideoObjectTrackingPredictionResult.Frame]): + All of the frames of the video in which a + single object instance has been detected. The + bounding boxes in the frames identify the same + object. + """ + + class Frame(proto.Message): + r"""The fields ``xMin``, ``xMax``, ``yMin``, and ``yMax`` refer to a + bounding box, i.e. the rectangle over the video frame pinpointing + the found AnnotationSpec. The coordinates are relative to the frame + size, and the point 0,0 is in the top left of the frame. + + Attributes: + time_offset (google.protobuf.duration_pb2.Duration): + A time (frame) of a video in which the object + has been detected. Expressed as a number of + seconds as measured from the start of the video, + with fractions up to a microsecond precision, + and with "s" appended at the end. + x_min (google.protobuf.wrappers_pb2.FloatValue): + The leftmost coordinate of the bounding box. + x_max (google.protobuf.wrappers_pb2.FloatValue): + The rightmost coordinate of the bounding box. + y_min (google.protobuf.wrappers_pb2.FloatValue): + The topmost coordinate of the bounding box. + y_max (google.protobuf.wrappers_pb2.FloatValue): + The bottommost coordinate of the bounding + box. + """ + + time_offset = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + x_min = proto.Field( + proto.MESSAGE, + number=2, + message=wrappers_pb2.FloatValue, + ) + x_max = proto.Field( + proto.MESSAGE, + number=3, + message=wrappers_pb2.FloatValue, + ) + y_min = proto.Field( + proto.MESSAGE, + number=4, + message=wrappers_pb2.FloatValue, + ) + y_max = proto.Field( + proto.MESSAGE, + number=5, + message=wrappers_pb2.FloatValue, + ) + + id = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.MESSAGE, + number=3, + message=duration_pb2.Duration, + ) + time_segment_end = proto.Field( + proto.MESSAGE, + number=4, + message=duration_pb2.Duration, + ) + confidence = proto.Field( + proto.MESSAGE, + number=5, + message=wrappers_pb2.FloatValue, + ) + frames = proto.RepeatedField( + proto.MESSAGE, + number=6, + message=Frame, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/schema/predict/prediction/mypy.ini b/schema/predict/prediction/mypy.ini new file mode 100644 index 0000000000..4505b48543 --- /dev/null +++ b/schema/predict/prediction/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.6 +namespace_packages = True diff --git a/schema/predict/prediction/noxfile.py b/schema/predict/prediction/noxfile.py new file mode 100644 index 0000000000..dba807db56 --- /dev/null +++ b/schema/predict/prediction/noxfile.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import pathlib +import shutil +import subprocess +import sys + + +import nox # type: ignore + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + +LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" +PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") + + +nox.sessions = [ + "unit", + "cover", + "mypy", + "check_lower_bounds" + # exclude update_lower_bounds from default + "docs", +] + +@nox.session(python=['3.6', '3.7', '3.8', '3.9']) +def unit(session): + """Run the unit test suite.""" + + session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') + session.install('-e', '.') + + session.run( + 'py.test', + '--quiet', + '--cov=google/cloud/aiplatform/v1/schema/predict/prediction_v1/', + '--cov-config=.coveragerc', + '--cov-report=term', + '--cov-report=html', + os.path.join('tests', 'unit', ''.join(session.posargs)) + ) + + +@nox.session(python='3.7') +def cover(session): + """Run the final coverage report. + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=100") + + session.run("coverage", "erase") + + +@nox.session(python=['3.6', '3.7']) +def mypy(session): + """Run the type checker.""" + session.install('mypy') + session.install('.') + session.run( + 'mypy', + '--explicit-package-bases', + 'google', + ) + + +@nox.session +def update_lower_bounds(session): + """Update lower bounds in constraints.txt to match setup.py""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'update', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + + +@nox.session +def check_lower_bounds(session): + """Check lower bounds in setup.py are reflected in constraints file""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'check', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + +@nox.session(python='3.6') +def docs(session): + """Build the docs for this library.""" + + session.install("-e", ".") + session.install("sphinx<3.0.0", "alabaster", "recommonmark") + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-W", # warnings as errors + "-T", # show full traceback on exception + "-N", # no colors + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) diff --git a/schema/predict/prediction/scripts/fixup_prediction_v1_keywords.py b/schema/predict/prediction/scripts/fixup_prediction_v1_keywords.py new file mode 100644 index 0000000000..7860bb63cf --- /dev/null +++ b/schema/predict/prediction/scripts/fixup_prediction_v1_keywords.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class predictionCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=predictionCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the prediction client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/schema/predict/prediction/scripts/fixup_prediction_v1beta1_keywords.py b/schema/predict/prediction/scripts/fixup_prediction_v1beta1_keywords.py new file mode 100644 index 0000000000..7860bb63cf --- /dev/null +++ b/schema/predict/prediction/scripts/fixup_prediction_v1beta1_keywords.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class predictionCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=predictionCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the prediction client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/schema/predict/prediction/setup.py b/schema/predict/prediction/setup.py new file mode 100644 index 0000000000..99756ad5a4 --- /dev/null +++ b/schema/predict/prediction/setup.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import io +import os +import setuptools # type: ignore + +version = '0.1.0' + +package_root = os.path.abspath(os.path.dirname(__file__)) + +readme_filename = os.path.join(package_root, 'README.rst') +with io.open(readme_filename, encoding='utf-8') as readme_file: + readme = readme_file.read() + +setuptools.setup( + name='google-cloud-aiplatform-v1-schema-predict-prediction', + version=version, + long_description=readme, + packages=setuptools.PEP420PackageFinder.find(), + namespace_packages=('google', 'google.cloud', 'google.cloud.aiplatform', 'google.cloud.aiplatform.v1', 'google.cloud.aiplatform.v1.schema', 'google.cloud.aiplatform.v1.schema.predict'), + platforms='Posix; MacOS X; Windows', + include_package_data=True, + install_requires=( + 'google-api-core[grpc] >= 1.22.2, < 2.0.0dev', + 'libcst >= 0.2.5', + 'proto-plus >= 1.15.0', + 'packaging >= 14.3', ), + python_requires='>=3.6', + classifiers=[ + 'Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Topic :: Internet', + 'Topic :: Software Development :: Libraries :: Python Modules', + ], + zip_safe=False, +) diff --git a/schema/predict/prediction/tests/__init__.py b/schema/predict/prediction/tests/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/schema/predict/prediction/tests/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/schema/predict/prediction/tests/unit/__init__.py b/schema/predict/prediction/tests/unit/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/schema/predict/prediction/tests/unit/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/schema/predict/prediction/tests/unit/gapic/__init__.py b/schema/predict/prediction/tests/unit/gapic/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/schema/predict/prediction/tests/unit/gapic/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/schema/predict/prediction/tests/unit/gapic/prediction_v1/__init__.py b/schema/predict/prediction/tests/unit/gapic/prediction_v1/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/schema/predict/prediction/tests/unit/gapic/prediction_v1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/schema/predict/prediction/tests/unit/gapic/prediction_v1beta1/__init__.py b/schema/predict/prediction/tests/unit/gapic/prediction_v1beta1/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/schema/predict/prediction/tests/unit/gapic/prediction_v1beta1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/synth.metadata b/synth.metadata deleted file mode 100644 index 5167eaca25..0000000000 --- a/synth.metadata +++ /dev/null @@ -1,53 +0,0 @@ -{ - "sources": [ - { - "git": { - "name": ".", - "remote": "https://github.com/googleapis/python-aiplatform.git", - "sha": "fd36abe48afa3bd4d95f152b97a65613cc2ff23c" - } - }, - { - "git": { - "name": "googleapis", - "remote": "https://github.com/googleapis/googleapis.git", - "sha": "7f6e0d54743dcb86c618b2f78aac2d51e02834b5", - "internalRef": "355883280" - } - }, - { - "git": { - "name": "synthtool", - "remote": "https://github.com/googleapis/synthtool.git", - "sha": "692715c0f23a7bb3bfbbaa300f7620ddfa8c47e5" - } - }, - { - "git": { - "name": "synthtool", - "remote": "https://github.com/googleapis/synthtool.git", - "sha": "692715c0f23a7bb3bfbbaa300f7620ddfa8c47e5" - } - } - ], - "destinations": [ - { - "client": { - "source": "googleapis", - "apiName": "aiplatform", - "apiVersion": "v1beta1", - "language": "python", - "generator": "bazel" - } - }, - { - "client": { - "source": "googleapis", - "apiName": "aiplatform", - "apiVersion": "v1", - "language": "python", - "generator": "bazel" - } - } - ] -} \ No newline at end of file diff --git a/synth.py b/synth.py deleted file mode 100644 index c60f1b9319..0000000000 --- a/synth.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This script is used to synthesize generated parts of this library.""" - -import os - -import synthtool as s -import synthtool.gcp as gcp -from synthtool.languages import python - -gapic = gcp.GAPICBazel() - -common = gcp.CommonTemplates() - -# ---------------------------------------------------------------------------- -# Generate AI Platform GAPIC layer -# ---------------------------------------------------------------------------- - - -versions = ["v1beta1", "v1"] - -for version in versions: - library = gapic.py_library( - service="aiplatform", - version=version, - bazel_target=f"//google/cloud/aiplatform/{version}:aiplatform-{version}-py", - ) - - s.move( - library, - excludes=[ - ".pre-commit-config.yaml", - "setup.py", - "README.rst", - "docs/index.rst", - f"docs/definition_{version}/services.rst", - f"docs/instance_{version}/services.rst", - f"docs/params_{version}/services.rst", - f"docs/prediction_{version}/services.rst", - f"scripts/fixup_aiplatform_{version}_keywords.py", - f"scripts/fixup_definition_{version}_keywords.py", - f"scripts/fixup_instance_{version}_keywords.py", - f"scripts/fixup_params_{version}_keywords.py", - f"scripts/fixup_prediction_{version}_keywords.py", - "google/cloud/aiplatform/__init__.py", - f"google/cloud/aiplatform/{version}/schema/**/services/", - f"tests/unit/gapic/definition_{version}/", - f"tests/unit/gapic/instance_{version}/", - f"tests/unit/gapic/params_{version}/", - f"tests/unit/gapic/prediction_{version}/", - ], - ) - - # --------------------------------------------------------------------- - # Patch each version of the library - # --------------------------------------------------------------------- - - # https://github.com/googleapis/gapic-generator-python/issues/413 - s.replace( - f"google/cloud/aiplatform_{version}/services/prediction_service/client.py", - "request.instances = instances", - "request.instances.extend(instances)", - ) - - # https://github.com/googleapis/gapic-generator-python/issues/672 - s.replace( - "google/cloud/aiplatform_{version}/services/endpoint_service/client.py", - "request.traffic_split.extend\(traffic_split\)", - "request.traffic_split = traffic_split", - ) - -# ---------------------------------------------------------------------------- -# Patch the library -# ---------------------------------------------------------------------------- - -# Fix assert with endpoint missing port -# https://github.com/googleapis/gapic-generator-python/issues/872 -s.replace( - "tests/unit/gapic/**/*.py", - '''create_channel\.assert_called_with\( -(\s+)"aiplatform\.googleapis\.com",''', - '''create_channel.assert_called_with( -\g<1>"aiplatform.googleapis.com:443",''' -) - -# Patch broken assert -# https://github.com/googleapis/gapic-generator-python/issues/414 -s.replace( - "tests/unit/gapic/**/test_prediction_service.py", - """assert args\[0\]\.parameters == struct_pb2\.Value\(null_value=struct_pb2\.NullValue\.NULL_VALUE\)""", - """# https://github.com/googleapis/gapic-generator-python/issues/414 - # assert args[0].parameters == struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)""" -) - -# Generator adds a bad import statement to enhanced type; -# need to fix in post-processing steps. -#s.replace( -# "google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py", -# "text_sentiment_pb2 as gcaspi_text_sentiment # type: ignore", -# "TextSentimentPredictionInstance") - -#s.replace( -# "google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py", -# "message=gcaspi_text_sentiment.TextSentimentPredictionInstance,", -# "message=TextSentimentPredictionInstance,") - - - -# ---------------------------------------------------------------------------- -# Add templated files -# ---------------------------------------------------------------------------- - -templated_files = common.py_library(cov_level=99, microgenerator=True) -s.move( - templated_files, - excludes=[ - ".coveragerc", - ".kokoro/samples/**" - ] -) # the microgenerator has a good coveragerc file - -# Don't treat docs warnings as errors -s.replace("noxfile.py", """["']-W["'], # warnings as errors""", "") - -s.shell.run(["nox", "-s", "blacken"], hide_output=False) diff --git a/testing/constraints-3.6.txt b/testing/constraints-3.6.txt index 6753ac710d..9631174f70 100644 --- a/testing/constraints-3.6.txt +++ b/testing/constraints-3.6.txt @@ -12,3 +12,4 @@ mock==4.0.2 google-cloud-storage==1.32.0 google-auth==1.25.0 # TODO: Remove when google-api-core >= 1.26.0 is required packaging==14.3 +grpcio-testing==1.34.0 \ No newline at end of file diff --git a/tests/unit/gapic/aiplatform_v1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1/test_migration_service.py index d4d1abdd46..5645588f27 100644 --- a/tests/unit/gapic/aiplatform_v1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_migration_service.py @@ -1732,18 +1732,20 @@ def test_parse_annotated_dataset_path(): def test_dataset_path(): project = "cuttlefish" - dataset = "mussel" - expected = "projects/{project}/datasets/{dataset}".format( - project=project, dataset=dataset, + location = "mussel" + dataset = "winkle" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "winkle", - "dataset": "nautilus", + "project": "nautilus", + "location": "scallop", + "dataset": "abalone", } path = MigrationServiceClient.dataset_path(**expected) @@ -1753,9 +1755,9 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "scallop" - location = "abalone" - dataset = "squid" + project = "squid" + location = "clam" + dataset = "whelk" expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, location=location, dataset=dataset, ) @@ -1765,9 +1767,9 @@ def test_dataset_path(): def test_parse_dataset_path(): expected = { - "project": "clam", - "location": "whelk", - "dataset": "octopus", + "project": "octopus", + "location": "oyster", + "dataset": "nudibranch", } path = MigrationServiceClient.dataset_path(**expected) @@ -1777,20 +1779,18 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "oyster" - location = "nudibranch" - dataset = "cuttlefish" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( - project=project, location=location, dataset=dataset, + project = "cuttlefish" + dataset = "mussel" + expected = "projects/{project}/datasets/{dataset}".format( + project=project, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "mussel", - "location": "winkle", + "project": "winkle", "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected)