diff --git a/.sample_configs/process_configs.yaml b/.sample_configs/process_configs.yaml index 7001e974ac..432abcd68d 100644 --- a/.sample_configs/process_configs.yaml +++ b/.sample_configs/process_configs.yaml @@ -69,48 +69,33 @@ create_hyperparameter_tuning_job_sample: {} create_specialist_pool_sample: {} create_training_pipeline_custom_job_sample: {} create_training_pipeline_custom_training_managed_dataset_sample: {} -create_training_pipeline_entity_extraction_sample: {} -create_training_pipeline_image_classification_sample: {} -create_training_pipeline_image_object_detection_sample: {} +create_training_pipeline_image_classification_sample: + schema_types: + training_task_inputs_dict: trainingjob.definition.AutoMlImageClassificationInputs +create_training_pipeline_image_object_detection_sample: + schema_types: + training_task_inputs_dict: trainingjob.definition.AutoMlImageObjectDetectionInputs create_training_pipeline_sample: {} create_training_pipeline_tabular_classification_sample: {} create_training_pipeline_tabular_regression_sample: {} -create_training_pipeline_text_classification_sample: {} +create_training_pipeline_text_classification_sample: + schema_types: + training_task_inputs_dict: trainingjob.definition.AutoMlTextClassificationInputs create_training_pipeline_text_entity_extraction_sample: - skip: - - predict_schemata - - supported_export_formats - - container_spec - - deployed_models - - explanation_spec + schema_types: + training_task_inputs_dict: trainingjob.definition.AutoMlTextExtractionInputs create_training_pipeline_text_sentiment_analysis_sample: - skip: - - predict_schemata - - supported_export_formats - - container_spec - - deployed_models - - explanation_spec + schema_types: + training_task_inputs_dict: trainingjob.definition.AutoMlTextSentimentInputs create_training_pipeline_video_action_recognition_sample: - skip: - - predict_schemata - - supported_export_formats - - container_spec - - deployed_models - - explanation_spec + schema_types: + training_task_inputs_dict: trainingjob.definition.AutoMlVideoActionRecognitionInputs create_training_pipeline_video_classification_sample: - skip: - - predict_schemata - - supported_export_formats - - container_spec - - deployed_models - - explanation_spec + schema_types: + training_task_inputs_dict: trainingjob.definition.AutoMlVideoClassificationInputs create_training_pipeline_video_object_tracking_sample: - skip: - - predict_schemata - - supported_export_formats - - container_spec - - deployed_models - - explanation_spec + schema_types: + training_task_inputs_dict: trainingjob.definition.AutoMlVideoObjectTrackingInputs delete_batch_prediction_job_sample: {} delete_custom_job_sample: {} delete_data_labeling_job_sample: {} diff --git a/samples/snippets/create_training_pipeline_image_classification_sample.py b/samples/snippets/create_training_pipeline_image_classification_sample.py index 9186e498d9..dddd3eb5cf 100644 --- a/samples/snippets/create_training_pipeline_image_classification_sample.py +++ b/samples/snippets/create_training_pipeline_image_classification_sample.py @@ -30,14 +30,13 @@ def create_training_pipeline_image_classification_sample( # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) - - icn_training_inputs = trainingjob.definition.AutoMlImageClassificationInputs( + training_task_inputs_object = trainingjob.definition.AutoMlImageClassificationInputs( multi_label=True, - model_type=trainingjob.definition.AutoMlImageClassificationInputs.ModelType.CLOUD, + model_type="CLOUD", budget_milli_node_hours=8000, - disable_early_stopping=False + disable_early_stopping=False, ) - training_task_inputs = icn_training_inputs.to_value() + training_task_inputs = training_task_inputs_object.to_value() training_pipeline = { "display_name": display_name, diff --git a/samples/snippets/create_training_pipeline_image_object_detection_sample.py b/samples/snippets/create_training_pipeline_image_object_detection_sample.py index 9337e82c0d..bb6d243ca2 100644 --- a/samples/snippets/create_training_pipeline_image_object_detection_sample.py +++ b/samples/snippets/create_training_pipeline_image_object_detection_sample.py @@ -14,8 +14,7 @@ # [START aiplatform_create_training_pipeline_image_object_detection_sample] from google.cloud import aiplatform -from google.protobuf import json_format -from google.protobuf.struct_pb2 import Value +from google.cloud.aiplatform.schema import trainingjob def create_training_pipeline_image_object_detection_sample( @@ -31,12 +30,12 @@ def create_training_pipeline_image_object_detection_sample( # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) - training_task_inputs_dict = { - "modelType": "CLOUD_HIGH_ACCURACY_1", - "budgetMilliNodeHours": 20000, - "disableEarlyStopping": False, - } - training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value()) + training_task_inputs_object = trainingjob.definition.AutoMlImageObjectDetectionInputs( + model_type="CLOUD_HIGH_ACCURACY_1", + budget_milli_node_hours=20000, + disable_early_stopping=False, + ) + training_task_inputs = training_task_inputs_object.to_value() training_pipeline = { "display_name": display_name, diff --git a/samples/snippets/create_training_pipeline_text_classification_sample.py b/samples/snippets/create_training_pipeline_text_classification_sample.py index f18579d659..95de59dfba 100644 --- a/samples/snippets/create_training_pipeline_text_classification_sample.py +++ b/samples/snippets/create_training_pipeline_text_classification_sample.py @@ -14,8 +14,7 @@ # [START aiplatform_create_training_pipeline_text_classification_sample] from google.cloud import aiplatform -from google.protobuf import json_format -from google.protobuf.struct_pb2 import Value +from google.cloud.aiplatform.schema import trainingjob def create_training_pipeline_text_classification_sample( @@ -31,8 +30,10 @@ def create_training_pipeline_text_classification_sample( # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) - training_task_inputs_dict = {} - training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value()) + training_task_inputs_object = ( + trainingjob.definition.AutoMlTextClassificationInputs() + ) + training_task_inputs = training_task_inputs_object.to_value() training_pipeline = { "display_name": display_name, diff --git a/samples/snippets/create_training_pipeline_text_entity_extraction_sample.py b/samples/snippets/create_training_pipeline_text_entity_extraction_sample.py index 10ee43dc64..131a2aba77 100644 --- a/samples/snippets/create_training_pipeline_text_entity_extraction_sample.py +++ b/samples/snippets/create_training_pipeline_text_entity_extraction_sample.py @@ -14,8 +14,7 @@ # [START aiplatform_create_training_pipeline_text_entity_extraction_sample] from google.cloud import aiplatform -from google.protobuf import json_format -from google.protobuf.struct_pb2 import Value +from google.cloud.aiplatform.schema import trainingjob def create_training_pipeline_text_entity_extraction_sample( @@ -31,8 +30,8 @@ def create_training_pipeline_text_entity_extraction_sample( # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) - training_task_inputs_dict = {} - training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value()) + training_task_inputs_object = trainingjob.definition.AutoMlTextExtractionInputs() + training_task_inputs = training_task_inputs_object.to_value() training_pipeline = { "display_name": display_name, diff --git a/samples/snippets/create_training_pipeline_text_sentiment_analysis_sample.py b/samples/snippets/create_training_pipeline_text_sentiment_analysis_sample.py index 4ac221fe5d..b12cd95fd7 100644 --- a/samples/snippets/create_training_pipeline_text_sentiment_analysis_sample.py +++ b/samples/snippets/create_training_pipeline_text_sentiment_analysis_sample.py @@ -14,8 +14,7 @@ # [START aiplatform_create_training_pipeline_text_sentiment_analysis_sample] from google.cloud import aiplatform -from google.protobuf import json_format -from google.protobuf.struct_pb2 import Value +from google.cloud.aiplatform.schema import trainingjob def create_training_pipeline_text_sentiment_analysis_sample( @@ -32,8 +31,10 @@ def create_training_pipeline_text_sentiment_analysis_sample( # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) # Use sentiment_max of 4 - training_task_inputs_dict = {"sentiment_max": 4} - training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value()) + training_task_inputs_object = trainingjob.definition.AutoMlTextSentimentInputs( + sentiment_max=4 + ) + training_task_inputs = training_task_inputs_object.to_value() training_pipeline = { "display_name": display_name, diff --git a/samples/snippets/create_training_pipeline_video_action_recognition_sample.py b/samples/snippets/create_training_pipeline_video_action_recognition_sample.py index facc8c1afc..5de1ecba9c 100644 --- a/samples/snippets/create_training_pipeline_video_action_recognition_sample.py +++ b/samples/snippets/create_training_pipeline_video_action_recognition_sample.py @@ -14,8 +14,7 @@ # [START aiplatform_create_training_pipeline_video_action_recognition_sample] from google.cloud import aiplatform -from google.protobuf import json_format -from google.protobuf.struct_pb2 import Value +from google.cloud.aiplatform.schema import trainingjob def create_training_pipeline_video_action_recognition_sample( @@ -32,11 +31,11 @@ def create_training_pipeline_video_action_recognition_sample( # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) - training_task_inputs_dict = { - # modelType can be either 'CLOUD' or 'MOBILE_VERSATILE_1' - "modelType": model_type - } - training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value()) + # modelType can be either 'CLOUD' or 'MOBILE_VERSATILE_1' + training_task_inputs_object = trainingjob.definition.AutoMlVideoActionRecognitionInputs( + model_type=model_type + ) + training_task_inputs = training_task_inputs_object.to_value() training_pipeline = { "display_name": display_name, diff --git a/samples/snippets/create_training_pipeline_video_classification_sample.py b/samples/snippets/create_training_pipeline_video_classification_sample.py index 6ff8c03ac6..f404ed62d9 100644 --- a/samples/snippets/create_training_pipeline_video_classification_sample.py +++ b/samples/snippets/create_training_pipeline_video_classification_sample.py @@ -14,8 +14,7 @@ # [START aiplatform_create_training_pipeline_video_classification_sample] from google.cloud import aiplatform -from google.protobuf import json_format -from google.protobuf.struct_pb2 import Value +from google.cloud.aiplatform.schema import trainingjob def create_training_pipeline_video_classification_sample( @@ -31,8 +30,10 @@ def create_training_pipeline_video_classification_sample( # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) - training_task_inputs_dict = {} - training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value()) + training_task_inputs_object = ( + trainingjob.definition.AutoMlVideoClassificationInputs() + ) + training_task_inputs = training_task_inputs_object.to_value() training_pipeline = { "display_name": display_name, diff --git a/samples/snippets/create_training_pipeline_video_object_tracking_sample.py b/samples/snippets/create_training_pipeline_video_object_tracking_sample.py index 52cfb4714a..562627f97c 100644 --- a/samples/snippets/create_training_pipeline_video_object_tracking_sample.py +++ b/samples/snippets/create_training_pipeline_video_object_tracking_sample.py @@ -14,8 +14,7 @@ # [START aiplatform_create_training_pipeline_video_object_tracking_sample] from google.cloud import aiplatform -from google.protobuf import json_format -from google.protobuf.struct_pb2 import Value +from google.cloud.aiplatform.schema import trainingjob def create_training_pipeline_video_object_tracking_sample( @@ -31,8 +30,10 @@ def create_training_pipeline_video_object_tracking_sample( # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) - training_task_inputs_dict = {"modelType": "CLOUD"} - training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value()) + training_task_inputs_object = trainingjob.definition.AutoMlVideoObjectTrackingInputs( + model_type="CLOUD" + ) + training_task_inputs = training_task_inputs_object.to_value() training_pipeline = { "display_name": display_name,