diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py index 4c2154dd90..fc5c3b587a 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py @@ -26,8 +26,8 @@ class ImageClassificationPredictionInstance(proto.Message): r"""Prediction input format for Image Classification. Attributes: content (str): - The image bytes or GCS URI to make the - prediction on. + The image bytes or Cloud Storage URI to make + the prediction on. mime_type (str): The MIME type of the content of the image. Only the images in below listed MIME types are diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py index d7b41623aa..e0293472e7 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py @@ -26,8 +26,8 @@ class ImageObjectDetectionPredictionInstance(proto.Message): r"""Prediction input format for Image Object Detection. Attributes: content (str): - The image bytes or GCS URI to make the - prediction on. + The image bytes or Cloud Storage URI to make + the prediction on. mime_type (str): The MIME type of the content of the image. Only the images in below listed MIME types are diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py index 9c393faa73..e3f7723171 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py @@ -36,7 +36,7 @@ class TextExtractionPredictionInstance(proto.Message): If a key is provided, the batch prediction result will by mapped to this key. If omitted, then the batch prediction result will contain - the entire input instance. AI Platform will not + the entire input instance. Vertex AI will not check if keys in the request are duplicates, so it is up to the caller to ensure the keys are unique. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py index 35ad2ca0ee..dd49c20661 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py @@ -37,16 +37,16 @@ class VideoClassificationPredictionParams(proto.Message): 10,000. segment_classification (bool): Set to true to request segment-level - classification. AI Platform returns labels and + classification. Vertex AI returns labels and their confidence scores for the entire time segment of the video that user specified in the input instance. Default value is true shot_classification (bool): Set to true to request shot-level - classification. AI Platform determines the + classification. Vertex AI determines the boundaries for each camera shot in the entire time segment of the video that user specified in - the input instance. AI Platform then returns + the input instance. Vertex AI then returns labels and their confidence scores for each detected shot, along with the start and end time of the shot. @@ -57,15 +57,14 @@ class VideoClassificationPredictionParams(proto.Message): Default value is false one_sec_interval_classification (bool): Set to true to request classification for a - video at one-second intervals. AI Platform - returns labels and their confidence scores for - each second of the entire time segment of the - video that user specified in the input WARNING: - Model evaluation is not done for this - classification type, the quality of it depends - on the training data, but there are no metrics - provided to describe that quality. Default value - is false + video at one-second intervals. Vertex AI returns + labels and their confidence scores for each + second of the entire time segment of the video + that user specified in the input WARNING: Model + evaluation is not done for this classification + type, the quality of it depends on the training + data, but there are no metrics provided to + describe that quality. Default value is false """ confidence_threshold = proto.Field(proto.FLOAT, number=1,) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py index d37236a5cc..6f70df673f 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py @@ -27,8 +27,7 @@ class ClassificationPredictionResult(proto.Message): Attributes: ids (Sequence[int]): The resource IDs of the AnnotationSpecs that - had been identified, ordered by the confidence - score descendingly. + had been identified. display_names (Sequence[str]): The display names of the AnnotationSpecs that had been identified, order matches the IDs. diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py index 3531ec74f6..a9650f92c6 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py @@ -139,6 +139,9 @@ class AutoMlTablesInputs(proto.Message): predictions to a BigQuery table. If this configuration is absent, then the export is not performed. + additional_experiments (Sequence[str]): + Additional experiment flags for the Tables + training pipeline. """ class Transformation(proto.Message): @@ -401,6 +404,7 @@ class TextArrayTransformation(proto.Message): number=10, message=gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig, ) + additional_experiments = proto.RepeatedField(proto.STRING, number=11,) class AutoMlTablesMetadata(proto.Message): diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py index 1d3f8d0e3f..7c586c3fd5 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py @@ -167,6 +167,9 @@ class AutoMlForecastingInputs(proto.Message): - "ignore-validation" - ignore the results of the validation and continue + additional_experiments (Sequence[str]): + Additional experiment flags for the time + series forcasting training. """ class Transformation(proto.Message): @@ -376,6 +379,7 @@ class Granularity(proto.Message): ) quantiles = proto.RepeatedField(proto.DOUBLE, number=16,) validation_options = proto.Field(proto.STRING, number=17,) + additional_experiments = proto.RepeatedField(proto.STRING, number=25,) class AutoMlForecastingMetadata(proto.Message): diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py index 4132a92bdc..9404e18964 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py @@ -48,6 +48,8 @@ class ModelType(proto.Enum): MODEL_TYPE_UNSPECIFIED = 0 CLOUD = 1 MOBILE_VERSATILE_1 = 2 + MOBILE_JETSON_VERSATILE_1 = 3 + MOBILE_CORAL_VERSATILE_1 = 4 model_type = proto.Field(proto.ENUM, number=1, enum=ModelType,) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py index 15046f72c1..861a4b3e8a 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py @@ -33,7 +33,6 @@ class ExportEvaluatedDataItemsConfig(proto.Message): If not specified, then results are exported to the following auto-created BigQuery table: - :export_evaluated_examples__.evaluated_examples override_existing_table (bool): If true and an export destination is diff --git a/google/cloud/aiplatform_v1/services/migration_service/client.py b/google/cloud/aiplatform_v1/services/migration_service/client.py index fb8d9a9a4d..162818246f 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1/services/migration_service/client.py @@ -195,32 +195,32 @@ def parse_dataset_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def dataset_path(project: str, dataset: str,) -> str: + def dataset_path(project: str, location: str, dataset: str,) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( - project=project, dataset=dataset, + return "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod - def dataset_path(project: str, location: str, dataset: str,) -> str: + def dataset_path(project: str, dataset: str,) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( - project=project, location=location, dataset=dataset, + return "projects/{project}/datasets/{dataset}".format( + project=project, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py index 4736076c68..47d85e73f2 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py @@ -179,19 +179,16 @@ def parse_annotated_dataset_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def dataset_path(project: str, location: str, dataset: str,) -> str: + def dataset_path(project: str, dataset: str,) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( - project=project, location=location, dataset=dataset, + return "projects/{project}/datasets/{dataset}".format( + project=project, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod @@ -211,16 +208,19 @@ def parse_dataset_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def dataset_path(project: str, dataset: str,) -> str: + def dataset_path(project: str, location: str, dataset: str,) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( - project=project, dataset=dataset, + return "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py index c85d4a96cd..aac9e2bc91 100644 --- a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py @@ -28,8 +28,8 @@ class ImageClassificationPredictionInstance(proto.Message): r"""Prediction input format for Image Classification. Attributes: content (str): - The image bytes or GCS URI to make the - prediction on. + The image bytes or Cloud Storage URI to make + the prediction on. mime_type (str): The MIME type of the content of the image. Only the images in below listed MIME types are diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py index d9895e3372..80a5d797a1 100644 --- a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py @@ -28,8 +28,8 @@ class ImageObjectDetectionPredictionInstance(proto.Message): r"""Prediction input format for Image Object Detection. Attributes: content (str): - The image bytes or GCS URI to make the - prediction on. + The image bytes or Cloud Storage URI to make + the prediction on. mime_type (str): The MIME type of the content of the image. Only the images in below listed MIME types are diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py index 0b1304d1c3..8f47b27080 100644 --- a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py @@ -38,7 +38,7 @@ class TextExtractionPredictionInstance(proto.Message): If a key is provided, the batch prediction result will by mapped to this key. If omitted, then the batch prediction result will contain - the entire input instance. AI Platform will not + the entire input instance. Vertex AI will not check if keys in the request are duplicates, so it is up to the caller to ensure the keys are unique. diff --git a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py index e0cbd81db9..ff7bbd1b86 100644 --- a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py +++ b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py @@ -39,16 +39,16 @@ class VideoClassificationPredictionParams(proto.Message): 10,000. segment_classification (bool): Set to true to request segment-level - classification. AI Platform returns labels and + classification. Vertex AI returns labels and their confidence scores for the entire time segment of the video that user specified in the input instance. Default value is true shot_classification (bool): Set to true to request shot-level - classification. AI Platform determines the + classification. Vertex AI determines the boundaries for each camera shot in the entire time segment of the video that user specified in - the input instance. AI Platform then returns + the input instance. Vertex AI then returns labels and their confidence scores for each detected shot, along with the start and end time of the shot. @@ -59,15 +59,14 @@ class VideoClassificationPredictionParams(proto.Message): Default value is false one_sec_interval_classification (bool): Set to true to request classification for a - video at one-second intervals. AI Platform - returns labels and their confidence scores for - each second of the entire time segment of the - video that user specified in the input WARNING: - Model evaluation is not done for this - classification type, the quality of it depends - on the training data, but there are no metrics - provided to describe that quality. Default value - is false + video at one-second intervals. Vertex AI returns + labels and their confidence scores for each + second of the entire time segment of the video + that user specified in the input WARNING: Model + evaluation is not done for this classification + type, the quality of it depends on the training + data, but there are no metrics provided to + describe that quality. Default value is false """ confidence_threshold = proto.Field( diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py index 858691c322..ecd16c7250 100644 --- a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py @@ -29,8 +29,7 @@ class ClassificationPredictionResult(proto.Message): Attributes: ids (Sequence[int]): The resource IDs of the AnnotationSpecs that - had been identified, ordered by the confidence - score descendingly. + had been identified. display_names (Sequence[str]): The display names of the AnnotationSpecs that had been identified, order matches the IDs. diff --git a/tests/unit/gapic/aiplatform_v1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1/test_migration_service.py index 1f782b88aa..c60e86af14 100644 --- a/tests/unit/gapic/aiplatform_v1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_migration_service.py @@ -1700,18 +1700,20 @@ def test_parse_dataset_path(): def test_dataset_path(): project = "squid" - dataset = "clam" - expected = "projects/{project}/datasets/{dataset}".format( - project=project, dataset=dataset, + location = "clam" + dataset = "whelk" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "whelk", - "dataset": "octopus", + "project": "octopus", + "location": "oyster", + "dataset": "nudibranch", } path = MigrationServiceClient.dataset_path(**expected) @@ -1721,20 +1723,18 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "oyster" - location = "nudibranch" - dataset = "cuttlefish" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( - project=project, location=location, dataset=dataset, + project = "cuttlefish" + dataset = "mussel" + expected = "projects/{project}/datasets/{dataset}".format( + project=project, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "mussel", - "location": "winkle", + "project": "winkle", "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py index 32141aea5e..9a343540aa 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py @@ -1678,20 +1678,18 @@ def test_parse_annotated_dataset_path(): def test_dataset_path(): project = "cuttlefish" - location = "mussel" - dataset = "winkle" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( - project=project, location=location, dataset=dataset, + dataset = "mussel" + expected = "projects/{project}/datasets/{dataset}".format( + project=project, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "nautilus", - "location": "scallop", - "dataset": "abalone", + "project": "winkle", + "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected) @@ -1701,9 +1699,9 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "squid" - location = "clam" - dataset = "whelk" + project = "scallop" + location = "abalone" + dataset = "squid" expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, location=location, dataset=dataset, ) @@ -1713,9 +1711,9 @@ def test_dataset_path(): def test_parse_dataset_path(): expected = { - "project": "octopus", - "location": "oyster", - "dataset": "nudibranch", + "project": "clam", + "location": "whelk", + "dataset": "octopus", } path = MigrationServiceClient.dataset_path(**expected) @@ -1725,18 +1723,20 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "cuttlefish" - dataset = "mussel" - expected = "projects/{project}/datasets/{dataset}".format( - project=project, dataset=dataset, + project = "oyster" + location = "nudibranch" + dataset = "cuttlefish" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "winkle", + "project": "mussel", + "location": "winkle", "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected)