From 4c60ad67dcd9026cb989d6e81dec4813cbae962f Mon Sep 17 00:00:00 2001 From: Morgan Du Date: Wed, 25 Nov 2020 10:32:08 -0800 Subject: [PATCH] feat: add video action recognition samples (#77) Co-authored-by: Yu-Han Liu --- ...ion_job_video_action_recognition_sample.py | 60 +++++++++++ ...ction_job_video_action_recognition_test.py | 82 ++++++++++++++ ...ipeline_video_action_recognition_sample.py | 54 ++++++++++ ..._pipeline_video_action_recognition_test.py | 100 ++++++++++++++++++ ...t_model_video_action_recognition_sample.py | 44 ++++++++ ...ort_model_video_action_recognition_test.py | 45 ++++++++ ...luation_video_action_recognition_sample.py | 37 +++++++ ...valuation_video_action_recognition_test.py | 30 ++++++ ...rt_data_video_action_recognition_sample.py | 44 ++++++++ ...port_data_video_action_recognition_test.py | 84 +++++++++++++++ 10 files changed, 580 insertions(+) create mode 100644 samples/snippets/create_batch_prediction_job_video_action_recognition_sample.py create mode 100644 samples/snippets/create_batch_prediction_job_video_action_recognition_test.py create mode 100644 samples/snippets/create_training_pipeline_video_action_recognition_sample.py create mode 100644 samples/snippets/create_training_pipeline_video_action_recognition_test.py create mode 100644 samples/snippets/export_model_video_action_recognition_sample.py create mode 100644 samples/snippets/export_model_video_action_recognition_test.py create mode 100644 samples/snippets/get_model_evaluation_video_action_recognition_sample.py create mode 100644 samples/snippets/get_model_evaluation_video_action_recognition_test.py create mode 100644 samples/snippets/import_data_video_action_recognition_sample.py create mode 100644 samples/snippets/import_data_video_action_recognition_test.py diff --git a/samples/snippets/create_batch_prediction_job_video_action_recognition_sample.py b/samples/snippets/create_batch_prediction_job_video_action_recognition_sample.py new file mode 100644 index 0000000000..e5775d3e36 --- /dev/null +++ b/samples/snippets/create_batch_prediction_job_video_action_recognition_sample.py @@ -0,0 +1,60 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_create_batch_prediction_job_video_action_recognition_sample] +from google.cloud import aiplatform +from google.protobuf import json_format +from google.protobuf.struct_pb2 import Value + + +def create_batch_prediction_job_video_action_recognition_sample( + project: str, + display_name: str, + model: str, + gcs_source_uri: str, + gcs_destination_output_uri_prefix: str, + location: str = "us-central1", + api_endpoint: str = "us-central1-aiplatform.googleapis.com", +): + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.JobServiceClient(client_options=client_options) + model_parameters_dict = { + "confidenceThreshold": 0.5, + } + model_parameters = json_format.ParseDict(model_parameters_dict, Value()) + + batch_prediction_job = { + "display_name": display_name, + # Format: 'projects/{project}/locations/{location}/models/{model_id}' + "model": model, + "model_parameters": model_parameters, + "input_config": { + "instances_format": "jsonl", + "gcs_source": {"uris": [gcs_source_uri]}, + }, + "output_config": { + "predictions_format": "jsonl", + "gcs_destination": {"output_uri_prefix": gcs_destination_output_uri_prefix}, + }, + } + parent = f"projects/{project}/locations/{location}" + response = client.create_batch_prediction_job( + parent=parent, batch_prediction_job=batch_prediction_job + ) + print("response:", response) + + +# [END aiplatform_create_batch_prediction_job_video_action_recognition_sample] diff --git a/samples/snippets/create_batch_prediction_job_video_action_recognition_test.py b/samples/snippets/create_batch_prediction_job_video_action_recognition_test.py new file mode 100644 index 0000000000..7a3e07a082 --- /dev/null +++ b/samples/snippets/create_batch_prediction_job_video_action_recognition_test.py @@ -0,0 +1,82 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid +import pytest +import os + +import helpers + +import create_batch_prediction_job_video_action_recognition_sample + +from google.cloud import aiplatform + +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") +LOCATION = "us-central1" +MODEL_ID = "3530998029718913024" # permanent_swim_run_videos_action_recognition_model +DISPLAY_NAME = f"temp_create_batch_prediction_job_video_action_recognition_test_{uuid.uuid4()}" +GCS_SOURCE_URI = "gs://automl-video-demo-data/ucaip-var/swimrun_bp.jsonl" +GCS_OUTPUT_URI = "gs://ucaip-samples-test-output/" +API_ENDPOINT = "us-central1-aiplatform.googleapis.com" + +@pytest.fixture +def shared_state(): + state = {} + yield state + + +@pytest.fixture +def job_client(): + client_options = {"api_endpoint": API_ENDPOINT} + job_client = aiplatform.gapic.JobServiceClient( + client_options=client_options) + yield job_client + + +@pytest.fixture(scope="function", autouse=True) +def teardown(shared_state, job_client): + yield + job_client.delete_batch_prediction_job( + name=shared_state["batch_prediction_job_name"] + ) + + +# Creating AutoML Video Object Tracking batch prediction job +def test_create_batch_prediction_job_video_action_recognition_sample( + capsys, shared_state, job_client +): + + model = f"projects/{PROJECT_ID}/locations/{LOCATION}/models/{MODEL_ID}" + + create_batch_prediction_job_video_action_recognition_sample.create_batch_prediction_job_video_action_recognition_sample( + project=PROJECT_ID, + display_name=DISPLAY_NAME, + model=model, + gcs_source_uri=GCS_SOURCE_URI, + gcs_destination_output_uri_prefix=GCS_OUTPUT_URI, + ) + + out, _ = capsys.readouterr() + + # Save resource name of the newly created batch prediction job + shared_state["batch_prediction_job_name"] = helpers.get_name(out) + + # Waiting for batch prediction job to be in CANCELLED state + helpers.wait_for_job_state( + get_job_method=job_client.get_batch_prediction_job, + name=shared_state["batch_prediction_job_name"], + expected_state="SUCCEEDED", + timeout=600, + freq=20, + ) diff --git a/samples/snippets/create_training_pipeline_video_action_recognition_sample.py b/samples/snippets/create_training_pipeline_video_action_recognition_sample.py new file mode 100644 index 0000000000..aff9f5059b --- /dev/null +++ b/samples/snippets/create_training_pipeline_video_action_recognition_sample.py @@ -0,0 +1,54 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_create_training_pipeline_video_action_recognition_sample] +from google.cloud import aiplatform +from google.protobuf import json_format +from google.protobuf.struct_pb2 import Value + + +def create_training_pipeline_video_action_recognition_sample( + project: str, + display_name: str, + dataset_id: str, + model_display_name: str, + model_type: str, + location: str = "us-central1", + api_endpoint: str = "us-central1-aiplatform.googleapis.com", +): + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) + training_task_inputs_dict = { + # modelType can be either 'CLOUD' or 'MOBILE_VERSATILE_1' + "modelType": model_type + } + training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value()) + + training_pipeline = { + "display_name": display_name, + "training_task_definition": "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_video_action_recognition_1.0.0.yaml", + "training_task_inputs": training_task_inputs, + "input_data_config": {"dataset_id": dataset_id}, + "model_to_upload": {"display_name": model_display_name}, + } + parent = f"projects/{project}/locations/{location}" + response = client.create_training_pipeline( + parent=parent, training_pipeline=training_pipeline + ) + print("response:", response) + + +# [END aiplatform_create_training_pipeline_video_action_recognition_sample] diff --git a/samples/snippets/create_training_pipeline_video_action_recognition_test.py b/samples/snippets/create_training_pipeline_video_action_recognition_test.py new file mode 100644 index 0000000000..b443746d67 --- /dev/null +++ b/samples/snippets/create_training_pipeline_video_action_recognition_test.py @@ -0,0 +1,100 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid +import pytest +import os + +import helpers + +import create_training_pipeline_video_action_recognition_sample + +from google.cloud import aiplatform + +LOCATION = "us-central1" +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") +DATASET_ID = "6881957627459272704" # permanent_swim_run_videos_action_recognition_dataset +DISPLAY_NAME = f"temp_create_training_pipeline_video_action_recognition_test_{uuid.uuid4()}" +MODEL_DISPLAY_NAME = f"Temp Model for {DISPLAY_NAME}" +MODEL_TYPE = "CLOUD" +API_ENDPOINT = "us-central1-aiplatform.googleapis.com" + +@pytest.fixture +def shared_state(): + state = {} + yield state + + +@pytest.fixture +def pipeline_client(): + client_options = {"api_endpoint": API_ENDPOINT} + pipeline_client = aiplatform.gapic.PipelineServiceClient( + client_options=client_options + ) + yield pipeline_client + + +@pytest.fixture +def model_client(): + client_options = {"api_endpoint": API_ENDPOINT} + model_client = aiplatform.gapic.ModelServiceClient( + client_options=client_options) + yield model_client + + +@pytest.fixture(scope="function", autouse=True) +def teardown(shared_state, model_client, pipeline_client): + yield + model_client.delete_model(name=shared_state["model_name"]) + pipeline_client.delete_training_pipeline( + name=shared_state["training_pipeline_name"] + ) + + +# Training AutoML Vision Model +def test_create_training_pipeline_video_action_recognition_sample( + capsys, shared_state, pipeline_client +): + create_training_pipeline_video_action_recognition_sample.create_training_pipeline_video_action_recognition_sample( + project=PROJECT_ID, + display_name=DISPLAY_NAME, + dataset_id=DATASET_ID, + model_display_name=MODEL_DISPLAY_NAME, + model_type=MODEL_TYPE, + ) + + out, _ = capsys.readouterr() + + assert "response:" in out + + # Save resource name of the newly created training pipeline + shared_state["training_pipeline_name"] = helpers.get_name(out) + + # Poll until the pipeline succeeds because we want to test the model_upload step as well. + helpers.wait_for_job_state( + get_job_method=pipeline_client.get_training_pipeline, + name=shared_state["training_pipeline_name"], + expected_state="SUCCEEDED", + timeout=5000, + freq=20, + ) + + training_pipeline = pipeline_client.get_training_pipeline( + name=shared_state["training_pipeline_name"] + ) + + # Check that the model indeed has been uploaded. + assert training_pipeline.model_to_upload.name != "" + + shared_state["model_name"] = training_pipeline.model_to_upload.name diff --git a/samples/snippets/export_model_video_action_recognition_sample.py b/samples/snippets/export_model_video_action_recognition_sample.py new file mode 100644 index 0000000000..570f82fba5 --- /dev/null +++ b/samples/snippets/export_model_video_action_recognition_sample.py @@ -0,0 +1,44 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_export_model_video_action_recognition_sample] +from google.cloud import aiplatform + + +def export_model_video_action_recognition_sample( + project: str, + model_id: str, + gcs_destination_output_uri_prefix: str, + export_format: str, + location: str = "us-central1", + api_endpoint: str = "us-central1-aiplatform.googleapis.com", + timeout: int = 300, +): + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.ModelServiceClient(client_options=client_options) + gcs_destination = {"output_uri_prefix": gcs_destination_output_uri_prefix} + output_config = { + "artifact_destination": gcs_destination, + "export_format_id": export_format, + } + name = client.model_path(project=project, location=location, model=model_id) + response = client.export_model(name=name, output_config=output_config) + print("Long running operation:", response.operation.name) + export_model_response = response.result(timeout=timeout) + print("export_model_response:", export_model_response) + + +# [END aiplatform_export_model_video_action_recognition_sample] diff --git a/samples/snippets/export_model_video_action_recognition_test.py b/samples/snippets/export_model_video_action_recognition_test.py new file mode 100644 index 0000000000..543be7dc47 --- /dev/null +++ b/samples/snippets/export_model_video_action_recognition_test.py @@ -0,0 +1,45 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import os + +import export_model_video_action_recognition_sample +from google.cloud import storage + +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") +MODEL_ID = "3422489426196955136" # permanent_swim_run_videos_action_recognition_edge_model +GCS_URI = "gs://ucaip-samples-test-output/tmp/export_model_video_action_recognition_sample" +EXPORT_FORMAT = "tf-saved-model" + +@pytest.fixture(scope="function", autouse=True) +def teardown(): + yield + + storage_client = storage.Client() + bucket = storage_client.get_bucket("ucaip-samples-test-output") + blobs = bucket.list_blobs(prefix="tmp/export_model_video_action_recognition_sample") + for blob in blobs: + blob.delete() + + +def test_export_model_video_action_recognition_sample(capsys): + export_model_video_action_recognition_sample.export_model_video_action_recognition_sample( + project=PROJECT_ID, + model_id=MODEL_ID, + gcs_destination_output_uri_prefix=GCS_URI, + export_format=EXPORT_FORMAT, + ) + out, _ = capsys.readouterr() + assert "export_model_response" in out diff --git a/samples/snippets/get_model_evaluation_video_action_recognition_sample.py b/samples/snippets/get_model_evaluation_video_action_recognition_sample.py new file mode 100644 index 0000000000..10fde4d286 --- /dev/null +++ b/samples/snippets/get_model_evaluation_video_action_recognition_sample.py @@ -0,0 +1,37 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_get_model_evaluation_video_action_recognition_sample] +from google.cloud import aiplatform + + +def get_model_evaluation_video_action_recognition_sample( + project: str, + model_id: str, + evaluation_id: str, + location: str = "us-central1", + api_endpoint: str = "us-central1-aiplatform.googleapis.com", +): + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.ModelServiceClient(client_options=client_options) + name = client.model_evaluation_path( + project=project, location=location, model=model_id, evaluation=evaluation_id + ) + response = client.get_model_evaluation(name=name) + print("response:", response) + + +# [END aiplatform_get_model_evaluation_video_action_recognition_sample] diff --git a/samples/snippets/get_model_evaluation_video_action_recognition_test.py b/samples/snippets/get_model_evaluation_video_action_recognition_test.py new file mode 100644 index 0000000000..973987e086 --- /dev/null +++ b/samples/snippets/get_model_evaluation_video_action_recognition_test.py @@ -0,0 +1,30 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import os + +import get_model_evaluation_video_object_tracking_sample + +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") +MODEL_ID = "3530998029718913024" # permanent_swim_run_videos_action_recognition_model +EVALUATION_ID = "305008923591573504" # Permanent horses evaluation + + +def test_ucaip_generated_get_model_evaluation_sample(capsys): + get_model_evaluation_video_object_tracking_sample.get_model_evaluation_video_object_tracking_sample( + project=PROJECT_ID, model_id=MODEL_ID, evaluation_id=EVALUATION_ID + ) + out, _ = capsys.readouterr() + assert "metrics_schema_uri" in out diff --git a/samples/snippets/import_data_video_action_recognition_sample.py b/samples/snippets/import_data_video_action_recognition_sample.py new file mode 100644 index 0000000000..ccc4ec1de9 --- /dev/null +++ b/samples/snippets/import_data_video_action_recognition_sample.py @@ -0,0 +1,44 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_import_data_video_action_recognition_sample] +from google.cloud import aiplatform + + +def import_data_video_action_recognition_sample( + project: str, + dataset_id: str, + gcs_source_uri: str, + location: str = "us-central1", + api_endpoint: str = "us-central1-aiplatform.googleapis.com", + timeout: int = 1800, +): + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.DatasetServiceClient(client_options=client_options) + import_configs = [ + { + "gcs_source": {"uris": [gcs_source_uri]}, + "import_schema_uri": "gs://google-cloud-aiplatform/schema/dataset/ioformat/video_action_recognition_io_format_1.0.0.yaml", + } + ] + name = client.dataset_path(project=project, location=location, dataset=dataset_id) + response = client.import_data(name=name, import_configs=import_configs) + print("Long running operation:", response.operation.name) + import_data_response = response.result(timeout=timeout) + print("import_data_response:", import_data_response) + + +# [END aiplatform_import_data_video_action_recognition_sample] diff --git a/samples/snippets/import_data_video_action_recognition_test.py b/samples/snippets/import_data_video_action_recognition_test.py new file mode 100644 index 0000000000..e62dc1f49f --- /dev/null +++ b/samples/snippets/import_data_video_action_recognition_test.py @@ -0,0 +1,84 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import pytest +import os + +import uuid +from google.cloud import aiplatform + +import import_data_video_action_recognition_sample + + +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") +LOCATION = "us-central1" +GCS_SOURCE = "gs://automl-video-demo-data/ucaip-var/swimrun.jsonl" +METADATA_SCHEMA_URI = "gs://google-cloud-aiplatform/schema/dataset/metadata/video_1.0.0.yaml" + +API_ENDPOINT = "us-central1-aiplatform.googleapis.com" +DISPLAY_NAME = f"temp_import_data_video_action_recognition_test_{uuid.uuid4()}" + + +@pytest.fixture +def shared_state(): + shared_state = {} + yield shared_state + + +@pytest.fixture +def dataset_client(): + client_options = {"api_endpoint": API_ENDPOINT} + dataset_client = aiplatform.gapic.DatasetServiceClient( + client_options=client_options + ) + yield dataset_client + + +@pytest.fixture(scope="function", autouse=True) +def teardown(shared_state, dataset_client): + + yield + dataset_name = dataset_client.dataset_path( + project=PROJECT_ID, location=LOCATION, dataset=shared_state["dataset_id"] + ) + response = dataset_client.delete_dataset(name=dataset_name) + delete_dataset_response = response.result(timeout=120) + + +def test_import_data_video_action_recognition_sample( + capsys, shared_state, dataset_client +): + + dataset = aiplatform.gapic.Dataset( + display_name=DISPLAY_NAME, metadata_schema_uri=METADATA_SCHEMA_URI, + ) + + response = dataset_client.create_dataset( + parent=f"projects/{PROJECT_ID}/locations/{LOCATION}", dataset=dataset + ) + + create_dataset_response = response.result(timeout=120) + + shared_state["dataset_name"] = create_dataset_response.name + shared_state["dataset_id"] = create_dataset_response.name.split("/")[-1] + + import_data_video_action_recognition_sample.import_data_video_action_recognition_sample( + project=PROJECT_ID, + dataset_id=shared_state["dataset_id"], + gcs_source_uri=GCS_SOURCE, + ) + out, _ = capsys.readouterr() + + assert "import_data_response" in out