Skip to content

Commit

Permalink
feat: add video action recognition samples (#77)
Browse files Browse the repository at this point in the history
Co-authored-by: Yu-Han Liu <yuhanliu@google.com>
  • Loading branch information
morgandu and dizcology committed Nov 25, 2020
1 parent 5155dee commit 4c60ad6
Show file tree
Hide file tree
Showing 10 changed files with 580 additions and 0 deletions.
@@ -0,0 +1,60 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# [START aiplatform_create_batch_prediction_job_video_action_recognition_sample]
from google.cloud import aiplatform
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Value


def create_batch_prediction_job_video_action_recognition_sample(
project: str,
display_name: str,
model: str,
gcs_source_uri: str,
gcs_destination_output_uri_prefix: str,
location: str = "us-central1",
api_endpoint: str = "us-central1-aiplatform.googleapis.com",
):
client_options = {"api_endpoint": api_endpoint}
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.JobServiceClient(client_options=client_options)
model_parameters_dict = {
"confidenceThreshold": 0.5,
}
model_parameters = json_format.ParseDict(model_parameters_dict, Value())

batch_prediction_job = {
"display_name": display_name,
# Format: 'projects/{project}/locations/{location}/models/{model_id}'
"model": model,
"model_parameters": model_parameters,
"input_config": {
"instances_format": "jsonl",
"gcs_source": {"uris": [gcs_source_uri]},
},
"output_config": {
"predictions_format": "jsonl",
"gcs_destination": {"output_uri_prefix": gcs_destination_output_uri_prefix},
},
}
parent = f"projects/{project}/locations/{location}"
response = client.create_batch_prediction_job(
parent=parent, batch_prediction_job=batch_prediction_job
)
print("response:", response)


# [END aiplatform_create_batch_prediction_job_video_action_recognition_sample]
@@ -0,0 +1,82 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import uuid
import pytest
import os

import helpers

import create_batch_prediction_job_video_action_recognition_sample

from google.cloud import aiplatform

PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT")
LOCATION = "us-central1"
MODEL_ID = "3530998029718913024" # permanent_swim_run_videos_action_recognition_model
DISPLAY_NAME = f"temp_create_batch_prediction_job_video_action_recognition_test_{uuid.uuid4()}"
GCS_SOURCE_URI = "gs://automl-video-demo-data/ucaip-var/swimrun_bp.jsonl"
GCS_OUTPUT_URI = "gs://ucaip-samples-test-output/"
API_ENDPOINT = "us-central1-aiplatform.googleapis.com"

@pytest.fixture
def shared_state():
state = {}
yield state


@pytest.fixture
def job_client():
client_options = {"api_endpoint": API_ENDPOINT}
job_client = aiplatform.gapic.JobServiceClient(
client_options=client_options)
yield job_client


@pytest.fixture(scope="function", autouse=True)
def teardown(shared_state, job_client):
yield
job_client.delete_batch_prediction_job(
name=shared_state["batch_prediction_job_name"]
)


# Creating AutoML Video Object Tracking batch prediction job
def test_create_batch_prediction_job_video_action_recognition_sample(
capsys, shared_state, job_client
):

model = f"projects/{PROJECT_ID}/locations/{LOCATION}/models/{MODEL_ID}"

create_batch_prediction_job_video_action_recognition_sample.create_batch_prediction_job_video_action_recognition_sample(
project=PROJECT_ID,
display_name=DISPLAY_NAME,
model=model,
gcs_source_uri=GCS_SOURCE_URI,
gcs_destination_output_uri_prefix=GCS_OUTPUT_URI,
)

out, _ = capsys.readouterr()

# Save resource name of the newly created batch prediction job
shared_state["batch_prediction_job_name"] = helpers.get_name(out)

# Waiting for batch prediction job to be in CANCELLED state
helpers.wait_for_job_state(
get_job_method=job_client.get_batch_prediction_job,
name=shared_state["batch_prediction_job_name"],
expected_state="SUCCEEDED",
timeout=600,
freq=20,
)
@@ -0,0 +1,54 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# [START aiplatform_create_training_pipeline_video_action_recognition_sample]
from google.cloud import aiplatform
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Value


def create_training_pipeline_video_action_recognition_sample(
project: str,
display_name: str,
dataset_id: str,
model_display_name: str,
model_type: str,
location: str = "us-central1",
api_endpoint: str = "us-central1-aiplatform.googleapis.com",
):
client_options = {"api_endpoint": api_endpoint}
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.PipelineServiceClient(client_options=client_options)
training_task_inputs_dict = {
# modelType can be either 'CLOUD' or 'MOBILE_VERSATILE_1'
"modelType": model_type
}
training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value())

training_pipeline = {
"display_name": display_name,
"training_task_definition": "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_video_action_recognition_1.0.0.yaml",
"training_task_inputs": training_task_inputs,
"input_data_config": {"dataset_id": dataset_id},
"model_to_upload": {"display_name": model_display_name},
}
parent = f"projects/{project}/locations/{location}"
response = client.create_training_pipeline(
parent=parent, training_pipeline=training_pipeline
)
print("response:", response)


# [END aiplatform_create_training_pipeline_video_action_recognition_sample]
@@ -0,0 +1,100 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import uuid
import pytest
import os

import helpers

import create_training_pipeline_video_action_recognition_sample

from google.cloud import aiplatform

LOCATION = "us-central1"
PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT")
DATASET_ID = "6881957627459272704" # permanent_swim_run_videos_action_recognition_dataset
DISPLAY_NAME = f"temp_create_training_pipeline_video_action_recognition_test_{uuid.uuid4()}"
MODEL_DISPLAY_NAME = f"Temp Model for {DISPLAY_NAME}"
MODEL_TYPE = "CLOUD"
API_ENDPOINT = "us-central1-aiplatform.googleapis.com"

@pytest.fixture
def shared_state():
state = {}
yield state


@pytest.fixture
def pipeline_client():
client_options = {"api_endpoint": API_ENDPOINT}
pipeline_client = aiplatform.gapic.PipelineServiceClient(
client_options=client_options
)
yield pipeline_client


@pytest.fixture
def model_client():
client_options = {"api_endpoint": API_ENDPOINT}
model_client = aiplatform.gapic.ModelServiceClient(
client_options=client_options)
yield model_client


@pytest.fixture(scope="function", autouse=True)
def teardown(shared_state, model_client, pipeline_client):
yield
model_client.delete_model(name=shared_state["model_name"])
pipeline_client.delete_training_pipeline(
name=shared_state["training_pipeline_name"]
)


# Training AutoML Vision Model
def test_create_training_pipeline_video_action_recognition_sample(
capsys, shared_state, pipeline_client
):
create_training_pipeline_video_action_recognition_sample.create_training_pipeline_video_action_recognition_sample(
project=PROJECT_ID,
display_name=DISPLAY_NAME,
dataset_id=DATASET_ID,
model_display_name=MODEL_DISPLAY_NAME,
model_type=MODEL_TYPE,
)

out, _ = capsys.readouterr()

assert "response:" in out

# Save resource name of the newly created training pipeline
shared_state["training_pipeline_name"] = helpers.get_name(out)

# Poll until the pipeline succeeds because we want to test the model_upload step as well.
helpers.wait_for_job_state(
get_job_method=pipeline_client.get_training_pipeline,
name=shared_state["training_pipeline_name"],
expected_state="SUCCEEDED",
timeout=5000,
freq=20,
)

training_pipeline = pipeline_client.get_training_pipeline(
name=shared_state["training_pipeline_name"]
)

# Check that the model indeed has been uploaded.
assert training_pipeline.model_to_upload.name != ""

shared_state["model_name"] = training_pipeline.model_to_upload.name
44 changes: 44 additions & 0 deletions samples/snippets/export_model_video_action_recognition_sample.py
@@ -0,0 +1,44 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# [START aiplatform_export_model_video_action_recognition_sample]
from google.cloud import aiplatform


def export_model_video_action_recognition_sample(
project: str,
model_id: str,
gcs_destination_output_uri_prefix: str,
export_format: str,
location: str = "us-central1",
api_endpoint: str = "us-central1-aiplatform.googleapis.com",
timeout: int = 300,
):
client_options = {"api_endpoint": api_endpoint}
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.ModelServiceClient(client_options=client_options)
gcs_destination = {"output_uri_prefix": gcs_destination_output_uri_prefix}
output_config = {
"artifact_destination": gcs_destination,
"export_format_id": export_format,
}
name = client.model_path(project=project, location=location, model=model_id)
response = client.export_model(name=name, output_config=output_config)
print("Long running operation:", response.operation.name)
export_model_response = response.result(timeout=timeout)
print("export_model_response:", export_model_response)


# [END aiplatform_export_model_video_action_recognition_sample]
45 changes: 45 additions & 0 deletions samples/snippets/export_model_video_action_recognition_test.py
@@ -0,0 +1,45 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import pytest
import os

import export_model_video_action_recognition_sample
from google.cloud import storage

PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT")
MODEL_ID = "3422489426196955136" # permanent_swim_run_videos_action_recognition_edge_model
GCS_URI = "gs://ucaip-samples-test-output/tmp/export_model_video_action_recognition_sample"
EXPORT_FORMAT = "tf-saved-model"

@pytest.fixture(scope="function", autouse=True)
def teardown():
yield

storage_client = storage.Client()
bucket = storage_client.get_bucket("ucaip-samples-test-output")
blobs = bucket.list_blobs(prefix="tmp/export_model_video_action_recognition_sample")
for blob in blobs:
blob.delete()


def test_export_model_video_action_recognition_sample(capsys):
export_model_video_action_recognition_sample.export_model_video_action_recognition_sample(
project=PROJECT_ID,
model_id=MODEL_ID,
gcs_destination_output_uri_prefix=GCS_URI,
export_format=EXPORT_FORMAT,
)
out, _ = capsys.readouterr()
assert "export_model_response" in out

0 comments on commit 4c60ad6

Please sign in to comment.