Skip to content

Commit

Permalink
feat: update create_training_pipeline samples (#142)
Browse files Browse the repository at this point in the history
* feat: update create_training_pipeline samples.  Use the schema types.
  • Loading branch information
dizcology committed Dec 22, 2020
1 parent 1cbd4a5 commit 624a08d
Show file tree
Hide file tree
Showing 9 changed files with 59 additions and 74 deletions.
53 changes: 19 additions & 34 deletions .sample_configs/process_configs.yaml
Expand Up @@ -69,48 +69,33 @@ create_hyperparameter_tuning_job_sample: {}
create_specialist_pool_sample: {}
create_training_pipeline_custom_job_sample: {}
create_training_pipeline_custom_training_managed_dataset_sample: {}
create_training_pipeline_entity_extraction_sample: {}
create_training_pipeline_image_classification_sample: {}
create_training_pipeline_image_object_detection_sample: {}
create_training_pipeline_image_classification_sample:
schema_types:
training_task_inputs_dict: trainingjob.definition.AutoMlImageClassificationInputs
create_training_pipeline_image_object_detection_sample:
schema_types:
training_task_inputs_dict: trainingjob.definition.AutoMlImageObjectDetectionInputs
create_training_pipeline_sample: {}
create_training_pipeline_tabular_classification_sample: {}
create_training_pipeline_tabular_regression_sample: {}
create_training_pipeline_text_classification_sample: {}
create_training_pipeline_text_classification_sample:
schema_types:
training_task_inputs_dict: trainingjob.definition.AutoMlTextClassificationInputs
create_training_pipeline_text_entity_extraction_sample:
skip:
- predict_schemata
- supported_export_formats
- container_spec
- deployed_models
- explanation_spec
schema_types:
training_task_inputs_dict: trainingjob.definition.AutoMlTextExtractionInputs
create_training_pipeline_text_sentiment_analysis_sample:
skip:
- predict_schemata
- supported_export_formats
- container_spec
- deployed_models
- explanation_spec
schema_types:
training_task_inputs_dict: trainingjob.definition.AutoMlTextSentimentInputs
create_training_pipeline_video_action_recognition_sample:
skip:
- predict_schemata
- supported_export_formats
- container_spec
- deployed_models
- explanation_spec
schema_types:
training_task_inputs_dict: trainingjob.definition.AutoMlVideoActionRecognitionInputs
create_training_pipeline_video_classification_sample:
skip:
- predict_schemata
- supported_export_formats
- container_spec
- deployed_models
- explanation_spec
schema_types:
training_task_inputs_dict: trainingjob.definition.AutoMlVideoClassificationInputs
create_training_pipeline_video_object_tracking_sample:
skip:
- predict_schemata
- supported_export_formats
- container_spec
- deployed_models
- explanation_spec
schema_types:
training_task_inputs_dict: trainingjob.definition.AutoMlVideoObjectTrackingInputs
delete_batch_prediction_job_sample: {}
delete_custom_job_sample: {}
delete_data_labeling_job_sample: {}
Expand Down
Expand Up @@ -30,14 +30,13 @@ def create_training_pipeline_image_classification_sample(
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.PipelineServiceClient(client_options=client_options)

icn_training_inputs = trainingjob.definition.AutoMlImageClassificationInputs(
training_task_inputs_object = trainingjob.definition.AutoMlImageClassificationInputs(
multi_label=True,
model_type=trainingjob.definition.AutoMlImageClassificationInputs.ModelType.CLOUD,
model_type="CLOUD",
budget_milli_node_hours=8000,
disable_early_stopping=False
disable_early_stopping=False,
)
training_task_inputs = icn_training_inputs.to_value()
training_task_inputs = training_task_inputs_object.to_value()

training_pipeline = {
"display_name": display_name,
Expand Down
Expand Up @@ -14,8 +14,7 @@

# [START aiplatform_create_training_pipeline_image_object_detection_sample]
from google.cloud import aiplatform
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Value
from google.cloud.aiplatform.schema import trainingjob


def create_training_pipeline_image_object_detection_sample(
Expand All @@ -31,12 +30,12 @@ def create_training_pipeline_image_object_detection_sample(
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.PipelineServiceClient(client_options=client_options)
training_task_inputs_dict = {
"modelType": "CLOUD_HIGH_ACCURACY_1",
"budgetMilliNodeHours": 20000,
"disableEarlyStopping": False,
}
training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value())
training_task_inputs_object = trainingjob.definition.AutoMlImageObjectDetectionInputs(
model_type="CLOUD_HIGH_ACCURACY_1",
budget_milli_node_hours=20000,
disable_early_stopping=False,
)
training_task_inputs = training_task_inputs_object.to_value()

training_pipeline = {
"display_name": display_name,
Expand Down
Expand Up @@ -14,8 +14,7 @@

# [START aiplatform_create_training_pipeline_text_classification_sample]
from google.cloud import aiplatform
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Value
from google.cloud.aiplatform.schema import trainingjob


def create_training_pipeline_text_classification_sample(
Expand All @@ -31,8 +30,10 @@ def create_training_pipeline_text_classification_sample(
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.PipelineServiceClient(client_options=client_options)
training_task_inputs_dict = {}
training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value())
training_task_inputs_object = (
trainingjob.definition.AutoMlTextClassificationInputs()
)
training_task_inputs = training_task_inputs_object.to_value()

training_pipeline = {
"display_name": display_name,
Expand Down
Expand Up @@ -14,8 +14,7 @@

# [START aiplatform_create_training_pipeline_text_entity_extraction_sample]
from google.cloud import aiplatform
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Value
from google.cloud.aiplatform.schema import trainingjob


def create_training_pipeline_text_entity_extraction_sample(
Expand All @@ -31,8 +30,8 @@ def create_training_pipeline_text_entity_extraction_sample(
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.PipelineServiceClient(client_options=client_options)
training_task_inputs_dict = {}
training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value())
training_task_inputs_object = trainingjob.definition.AutoMlTextExtractionInputs()
training_task_inputs = training_task_inputs_object.to_value()

training_pipeline = {
"display_name": display_name,
Expand Down
Expand Up @@ -14,8 +14,7 @@

# [START aiplatform_create_training_pipeline_text_sentiment_analysis_sample]
from google.cloud import aiplatform
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Value
from google.cloud.aiplatform.schema import trainingjob


def create_training_pipeline_text_sentiment_analysis_sample(
Expand All @@ -32,8 +31,10 @@ def create_training_pipeline_text_sentiment_analysis_sample(
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.PipelineServiceClient(client_options=client_options)
# Use sentiment_max of 4
training_task_inputs_dict = {"sentiment_max": 4}
training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value())
training_task_inputs_object = trainingjob.definition.AutoMlTextSentimentInputs(
sentiment_max=4
)
training_task_inputs = training_task_inputs_object.to_value()

training_pipeline = {
"display_name": display_name,
Expand Down
Expand Up @@ -14,8 +14,7 @@

# [START aiplatform_create_training_pipeline_video_action_recognition_sample]
from google.cloud import aiplatform
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Value
from google.cloud.aiplatform.schema import trainingjob


def create_training_pipeline_video_action_recognition_sample(
Expand All @@ -32,11 +31,11 @@ def create_training_pipeline_video_action_recognition_sample(
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.PipelineServiceClient(client_options=client_options)
training_task_inputs_dict = {
# modelType can be either 'CLOUD' or 'MOBILE_VERSATILE_1'
"modelType": model_type
}
training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value())
# modelType can be either 'CLOUD' or 'MOBILE_VERSATILE_1'
training_task_inputs_object = trainingjob.definition.AutoMlVideoActionRecognitionInputs(
model_type=model_type
)
training_task_inputs = training_task_inputs_object.to_value()

training_pipeline = {
"display_name": display_name,
Expand Down
Expand Up @@ -14,8 +14,7 @@

# [START aiplatform_create_training_pipeline_video_classification_sample]
from google.cloud import aiplatform
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Value
from google.cloud.aiplatform.schema import trainingjob


def create_training_pipeline_video_classification_sample(
Expand All @@ -31,8 +30,10 @@ def create_training_pipeline_video_classification_sample(
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.PipelineServiceClient(client_options=client_options)
training_task_inputs_dict = {}
training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value())
training_task_inputs_object = (
trainingjob.definition.AutoMlVideoClassificationInputs()
)
training_task_inputs = training_task_inputs_object.to_value()

training_pipeline = {
"display_name": display_name,
Expand Down
Expand Up @@ -14,8 +14,7 @@

# [START aiplatform_create_training_pipeline_video_object_tracking_sample]
from google.cloud import aiplatform
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Value
from google.cloud.aiplatform.schema import trainingjob


def create_training_pipeline_video_object_tracking_sample(
Expand All @@ -31,8 +30,10 @@ def create_training_pipeline_video_object_tracking_sample(
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.PipelineServiceClient(client_options=client_options)
training_task_inputs_dict = {"modelType": "CLOUD"}
training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value())
training_task_inputs_object = trainingjob.definition.AutoMlVideoObjectTrackingInputs(
model_type="CLOUD"
)
training_task_inputs = training_task_inputs_object.to_value()

training_pipeline = {
"display_name": display_name,
Expand Down

0 comments on commit 624a08d

Please sign in to comment.