Skip to content

Commit

Permalink
feat: Adds additional_experiments field to AutoMlTablesInputs (#544)
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 384715948

Source-Link: googleapis/googleapis@83c3728

Source-Link: googleapis/googleapis-gen@4c0c40d

feat: Adds additional_experiments field to AutoMlForecastingInputs 
feat: Adds two new ModelType constants for Video Action Recognition training jobs
  • Loading branch information
gcf-owl-bot[bot] committed Jul 14, 2021
1 parent f3a3d03 commit 8077b3d
Show file tree
Hide file tree
Showing 18 changed files with 100 additions and 95 deletions.
Expand Up @@ -26,8 +26,8 @@ class ImageClassificationPredictionInstance(proto.Message):
r"""Prediction input format for Image Classification.
Attributes:
content (str):
The image bytes or GCS URI to make the
prediction on.
The image bytes or Cloud Storage URI to make
the prediction on.
mime_type (str):
The MIME type of the content of the image.
Only the images in below listed MIME types are
Expand Down
Expand Up @@ -26,8 +26,8 @@ class ImageObjectDetectionPredictionInstance(proto.Message):
r"""Prediction input format for Image Object Detection.
Attributes:
content (str):
The image bytes or GCS URI to make the
prediction on.
The image bytes or Cloud Storage URI to make
the prediction on.
mime_type (str):
The MIME type of the content of the image.
Only the images in below listed MIME types are
Expand Down
Expand Up @@ -36,7 +36,7 @@ class TextExtractionPredictionInstance(proto.Message):
If a key is provided, the batch prediction
result will by mapped to this key. If omitted,
then the batch prediction result will contain
the entire input instance. AI Platform will not
the entire input instance. Vertex AI will not
check if keys in the request are duplicates, so
it is up to the caller to ensure the keys are
unique.
Expand Down
Expand Up @@ -37,16 +37,16 @@ class VideoClassificationPredictionParams(proto.Message):
10,000.
segment_classification (bool):
Set to true to request segment-level
classification. AI Platform returns labels and
classification. Vertex AI returns labels and
their confidence scores for the entire time
segment of the video that user specified in the
input instance. Default value is true
shot_classification (bool):
Set to true to request shot-level
classification. AI Platform determines the
classification. Vertex AI determines the
boundaries for each camera shot in the entire
time segment of the video that user specified in
the input instance. AI Platform then returns
the input instance. Vertex AI then returns
labels and their confidence scores for each
detected shot, along with the start and end time
of the shot.
Expand All @@ -57,15 +57,14 @@ class VideoClassificationPredictionParams(proto.Message):
Default value is false
one_sec_interval_classification (bool):
Set to true to request classification for a
video at one-second intervals. AI Platform
returns labels and their confidence scores for
each second of the entire time segment of the
video that user specified in the input WARNING:
Model evaluation is not done for this
classification type, the quality of it depends
on the training data, but there are no metrics
provided to describe that quality. Default value
is false
video at one-second intervals. Vertex AI returns
labels and their confidence scores for each
second of the entire time segment of the video
that user specified in the input WARNING: Model
evaluation is not done for this classification
type, the quality of it depends on the training
data, but there are no metrics provided to
describe that quality. Default value is false
"""

confidence_threshold = proto.Field(proto.FLOAT, number=1,)
Expand Down
Expand Up @@ -27,8 +27,7 @@ class ClassificationPredictionResult(proto.Message):
Attributes:
ids (Sequence[int]):
The resource IDs of the AnnotationSpecs that
had been identified, ordered by the confidence
score descendingly.
had been identified.
display_names (Sequence[str]):
The display names of the AnnotationSpecs that
had been identified, order matches the IDs.
Expand Down
Expand Up @@ -139,6 +139,9 @@ class AutoMlTablesInputs(proto.Message):
predictions to a BigQuery table. If this
configuration is absent, then the export is not
performed.
additional_experiments (Sequence[str]):
Additional experiment flags for the Tables
training pipeline.
"""

class Transformation(proto.Message):
Expand Down Expand Up @@ -401,6 +404,7 @@ class TextArrayTransformation(proto.Message):
number=10,
message=gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig,
)
additional_experiments = proto.RepeatedField(proto.STRING, number=11,)


class AutoMlTablesMetadata(proto.Message):
Expand Down
Expand Up @@ -167,6 +167,9 @@ class AutoMlForecastingInputs(proto.Message):
- "ignore-validation" - ignore the results of the
validation and continue
additional_experiments (Sequence[str]):
Additional experiment flags for the time
series forcasting training.
"""

class Transformation(proto.Message):
Expand Down Expand Up @@ -376,6 +379,7 @@ class Granularity(proto.Message):
)
quantiles = proto.RepeatedField(proto.DOUBLE, number=16,)
validation_options = proto.Field(proto.STRING, number=17,)
additional_experiments = proto.RepeatedField(proto.STRING, number=25,)


class AutoMlForecastingMetadata(proto.Message):
Expand Down
Expand Up @@ -48,6 +48,8 @@ class ModelType(proto.Enum):
MODEL_TYPE_UNSPECIFIED = 0
CLOUD = 1
MOBILE_VERSATILE_1 = 2
MOBILE_JETSON_VERSATILE_1 = 3
MOBILE_CORAL_VERSATILE_1 = 4

model_type = proto.Field(proto.ENUM, number=1, enum=ModelType,)

Expand Down
Expand Up @@ -33,7 +33,6 @@ class ExportEvaluatedDataItemsConfig(proto.Message):
If not specified, then results are exported to the following
auto-created BigQuery table:
<project_id>:export_evaluated_examples_<model_name>_<yyyy_MM_dd'T'HH_mm_ss_SSS'Z'>.evaluated_examples
override_existing_table (bool):
If true and an export destination is
Expand Down
22 changes: 11 additions & 11 deletions google/cloud/aiplatform_v1/services/migration_service/client.py
Expand Up @@ -195,32 +195,32 @@ def parse_dataset_path(path: str) -> Dict[str, str]:
return m.groupdict() if m else {}

@staticmethod
def dataset_path(project: str, dataset: str,) -> str:
def dataset_path(project: str, location: str, dataset: str,) -> str:
"""Returns a fully-qualified dataset string."""
return "projects/{project}/datasets/{dataset}".format(
project=project, dataset=dataset,
return "projects/{project}/locations/{location}/datasets/{dataset}".format(
project=project, location=location, dataset=dataset,
)

@staticmethod
def parse_dataset_path(path: str) -> Dict[str, str]:
"""Parses a dataset path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/datasets/(?P<dataset>.+?)$", path)
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/datasets/(?P<dataset>.+?)$",
path,
)
return m.groupdict() if m else {}

@staticmethod
def dataset_path(project: str, location: str, dataset: str,) -> str:
def dataset_path(project: str, dataset: str,) -> str:
"""Returns a fully-qualified dataset string."""
return "projects/{project}/locations/{location}/datasets/{dataset}".format(
project=project, location=location, dataset=dataset,
return "projects/{project}/datasets/{dataset}".format(
project=project, dataset=dataset,
)

@staticmethod
def parse_dataset_path(path: str) -> Dict[str, str]:
"""Parses a dataset path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/datasets/(?P<dataset>.+?)$",
path,
)
m = re.match(r"^projects/(?P<project>.+?)/datasets/(?P<dataset>.+?)$", path)
return m.groupdict() if m else {}

@staticmethod
Expand Down
Expand Up @@ -179,19 +179,16 @@ def parse_annotated_dataset_path(path: str) -> Dict[str, str]:
return m.groupdict() if m else {}

@staticmethod
def dataset_path(project: str, location: str, dataset: str,) -> str:
def dataset_path(project: str, dataset: str,) -> str:
"""Returns a fully-qualified dataset string."""
return "projects/{project}/locations/{location}/datasets/{dataset}".format(
project=project, location=location, dataset=dataset,
return "projects/{project}/datasets/{dataset}".format(
project=project, dataset=dataset,
)

@staticmethod
def parse_dataset_path(path: str) -> Dict[str, str]:
"""Parses a dataset path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/datasets/(?P<dataset>.+?)$",
path,
)
m = re.match(r"^projects/(?P<project>.+?)/datasets/(?P<dataset>.+?)$", path)
return m.groupdict() if m else {}

@staticmethod
Expand All @@ -211,16 +208,19 @@ def parse_dataset_path(path: str) -> Dict[str, str]:
return m.groupdict() if m else {}

@staticmethod
def dataset_path(project: str, dataset: str,) -> str:
def dataset_path(project: str, location: str, dataset: str,) -> str:
"""Returns a fully-qualified dataset string."""
return "projects/{project}/datasets/{dataset}".format(
project=project, dataset=dataset,
return "projects/{project}/locations/{location}/datasets/{dataset}".format(
project=project, location=location, dataset=dataset,
)

@staticmethod
def parse_dataset_path(path: str) -> Dict[str, str]:
"""Parses a dataset path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/datasets/(?P<dataset>.+?)$", path)
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/datasets/(?P<dataset>.+?)$",
path,
)
return m.groupdict() if m else {}

@staticmethod
Expand Down
Expand Up @@ -28,8 +28,8 @@ class ImageClassificationPredictionInstance(proto.Message):
r"""Prediction input format for Image Classification.
Attributes:
content (str):
The image bytes or GCS URI to make the
prediction on.
The image bytes or Cloud Storage URI to make
the prediction on.
mime_type (str):
The MIME type of the content of the image.
Only the images in below listed MIME types are
Expand Down
Expand Up @@ -28,8 +28,8 @@ class ImageObjectDetectionPredictionInstance(proto.Message):
r"""Prediction input format for Image Object Detection.
Attributes:
content (str):
The image bytes or GCS URI to make the
prediction on.
The image bytes or Cloud Storage URI to make
the prediction on.
mime_type (str):
The MIME type of the content of the image.
Only the images in below listed MIME types are
Expand Down
Expand Up @@ -38,7 +38,7 @@ class TextExtractionPredictionInstance(proto.Message):
If a key is provided, the batch prediction
result will by mapped to this key. If omitted,
then the batch prediction result will contain
the entire input instance. AI Platform will not
the entire input instance. Vertex AI will not
check if keys in the request are duplicates, so
it is up to the caller to ensure the keys are
unique.
Expand Down
Expand Up @@ -39,16 +39,16 @@ class VideoClassificationPredictionParams(proto.Message):
10,000.
segment_classification (bool):
Set to true to request segment-level
classification. AI Platform returns labels and
classification. Vertex AI returns labels and
their confidence scores for the entire time
segment of the video that user specified in the
input instance. Default value is true
shot_classification (bool):
Set to true to request shot-level
classification. AI Platform determines the
classification. Vertex AI determines the
boundaries for each camera shot in the entire
time segment of the video that user specified in
the input instance. AI Platform then returns
the input instance. Vertex AI then returns
labels and their confidence scores for each
detected shot, along with the start and end time
of the shot.
Expand All @@ -59,15 +59,14 @@ class VideoClassificationPredictionParams(proto.Message):
Default value is false
one_sec_interval_classification (bool):
Set to true to request classification for a
video at one-second intervals. AI Platform
returns labels and their confidence scores for
each second of the entire time segment of the
video that user specified in the input WARNING:
Model evaluation is not done for this
classification type, the quality of it depends
on the training data, but there are no metrics
provided to describe that quality. Default value
is false
video at one-second intervals. Vertex AI returns
labels and their confidence scores for each
second of the entire time segment of the video
that user specified in the input WARNING: Model
evaluation is not done for this classification
type, the quality of it depends on the training
data, but there are no metrics provided to
describe that quality. Default value is false
"""

confidence_threshold = proto.Field(
Expand Down
Expand Up @@ -29,8 +29,7 @@ class ClassificationPredictionResult(proto.Message):
Attributes:
ids (Sequence[int]):
The resource IDs of the AnnotationSpecs that
had been identified, ordered by the confidence
score descendingly.
had been identified.
display_names (Sequence[str]):
The display names of the AnnotationSpecs that
had been identified, order matches the IDs.
Expand Down
28 changes: 14 additions & 14 deletions tests/unit/gapic/aiplatform_v1/test_migration_service.py
Expand Up @@ -1700,18 +1700,20 @@ def test_parse_dataset_path():

def test_dataset_path():
project = "squid"
dataset = "clam"
expected = "projects/{project}/datasets/{dataset}".format(
project=project, dataset=dataset,
location = "clam"
dataset = "whelk"
expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(
project=project, location=location, dataset=dataset,
)
actual = MigrationServiceClient.dataset_path(project, dataset)
actual = MigrationServiceClient.dataset_path(project, location, dataset)
assert expected == actual


def test_parse_dataset_path():
expected = {
"project": "whelk",
"dataset": "octopus",
"project": "octopus",
"location": "oyster",
"dataset": "nudibranch",
}
path = MigrationServiceClient.dataset_path(**expected)

Expand All @@ -1721,20 +1723,18 @@ def test_parse_dataset_path():


def test_dataset_path():
project = "oyster"
location = "nudibranch"
dataset = "cuttlefish"
expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(
project=project, location=location, dataset=dataset,
project = "cuttlefish"
dataset = "mussel"
expected = "projects/{project}/datasets/{dataset}".format(
project=project, dataset=dataset,
)
actual = MigrationServiceClient.dataset_path(project, location, dataset)
actual = MigrationServiceClient.dataset_path(project, dataset)
assert expected == actual


def test_parse_dataset_path():
expected = {
"project": "mussel",
"location": "winkle",
"project": "winkle",
"dataset": "nautilus",
}
path = MigrationServiceClient.dataset_path(**expected)
Expand Down

0 comments on commit 8077b3d

Please sign in to comment.