Skip to content
This repository has been archived by the owner on Nov 29, 2023. It is now read-only.

Commit

Permalink
samples: add streaming_automl_object_tracking sample (#143)
Browse files Browse the repository at this point in the history
  • Loading branch information
morgandu committed May 21, 2021
1 parent 50da518 commit a8f37db
Show file tree
Hide file tree
Showing 2 changed files with 126 additions and 0 deletions.
116 changes: 116 additions & 0 deletions samples/analyze/beta_snippets.py
Expand Up @@ -39,6 +39,9 @@
python beta_snippets.py streaming-automl-classification resources/cat.mp4 \
$PROJECT_ID $MODEL_ID
python beta_snippets.py streaming-automl-object-tracking resources/cat.mp4 \
$PROJECT_ID $MODEL_ID
"""

import argparse
Expand Down Expand Up @@ -762,6 +765,110 @@ def stream_generator():
# [END video_streaming_automl_classification_beta]


def streaming_automl_object_tracking(path, project_id, model_id):
# [START video_streaming_automl_object_tracking_beta]
import io

from google.cloud import videointelligence_v1p3beta1 as videointelligence

# path = 'path_to_file'
# project_id = 'project_id'
# model_id = 'automl_object_tracking_model_id'

client = videointelligence.StreamingVideoIntelligenceServiceClient()

model_path = "projects/{}/locations/us-central1/models/{}".format(
project_id, model_id
)

automl_config = videointelligence.StreamingAutomlObjectTrackingConfig(
model_name=model_path
)

video_config = videointelligence.StreamingVideoConfig(
feature=videointelligence.StreamingFeature.STREAMING_AUTOML_OBJECT_TRACKING,
automl_object_tracking_config=automl_config,
)

# config_request should be the first in the stream of requests.
config_request = videointelligence.StreamingAnnotateVideoRequest(
video_config=video_config
)

# Set the chunk size to 5MB (recommended less than 10MB).
chunk_size = 5 * 1024 * 1024

# Load file content.
# Note: Input videos must have supported video codecs. See
# https://cloud.google.com/video-intelligence/docs/streaming/streaming#supported_video_codecs
# for more details.
stream = []
with io.open(path, "rb") as video_file:
while True:
data = video_file.read(chunk_size)
if not data:
break
stream.append(data)

def stream_generator():
yield config_request
for chunk in stream:
yield videointelligence.StreamingAnnotateVideoRequest(input_content=chunk)

requests = stream_generator()

# streaming_annotate_video returns a generator.
# The default timeout is about 300 seconds.
# To process longer videos it should be set to
# larger than the length (in seconds) of the stream.
responses = client.streaming_annotate_video(requests, timeout=900)

# Each response corresponds to about 1 second of video.
for response in responses:
# Check for errors.
if response.error.message:
print(response.error.message)
break

object_annotations = response.annotation_results.object_annotations

# object_annotations could be empty
if not object_annotations:
continue

for annotation in object_annotations:
# Each annotation has one frame, which has a timeoffset.
frame = annotation.frames[0]
time_offset = (
frame.time_offset.seconds + frame.time_offset.microseconds / 1e6
)

description = annotation.entity.description
confidence = annotation.confidence

# track_id tracks the same object in the video.
track_id = annotation.track_id

# description is in Unicode
print("{}s".format(time_offset))
print(u"\tEntity description: {}".format(description))
print("\tTrack Id: {}".format(track_id))
if annotation.entity.entity_id:
print("\tEntity id: {}".format(annotation.entity.entity_id))

print("\tConfidence: {}".format(confidence))

# Every annotation has only one frame
frame = annotation.frames[0]
box = frame.normalized_bounding_box
print("\tBounding box position:")
print("\tleft : {}".format(box.left))
print("\ttop : {}".format(box.top))
print("\tright : {}".format(box.right))
print("\tbottom: {}\n".format(box.bottom))
# [END video_streaming_automl_object_tracking_beta]


if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
Expand Down Expand Up @@ -826,6 +933,13 @@ def stream_generator():
video_streaming_automl_classification_parser.add_argument("project_id")
video_streaming_automl_classification_parser.add_argument("model_id")

video_streaming_automl_object_tracking_parser = subparsers.add_parser(
"streaming-automl-object-tracking", help=streaming_automl_object_tracking.__doc__
)
video_streaming_automl_object_tracking_parser.add_argument("path")
video_streaming_automl_object_tracking_parser.add_argument("project_id")
video_streaming_automl_object_tracking_parser.add_argument("model_id")

args = parser.parse_args()

if args.command == "transcription":
Expand All @@ -850,3 +964,5 @@ def stream_generator():
annotation_to_storage_streaming(args.path, args.output_uri)
elif args.command == "streaming-automl-classification":
streaming_automl_classification(args.path, args.project_id, args.model_id)
elif args.command == "streaming-automl-object-tracking":
streaming_automl_object_tracking(args.path, args.project_id, args.model_id)
10 changes: 10 additions & 0 deletions samples/analyze/beta_snippets_test.py
Expand Up @@ -176,3 +176,13 @@ def test_streaming_automl_classification(capsys, video_path):
beta_snippets.streaming_automl_classification(video_path, project_id, model_id)
out, _ = capsys.readouterr()
assert "brush_hair" in out


# Flaky Gateway
@pytest.mark.flaky(max_runs=3, min_passes=1)
def test_streaming_automl_object_tracking(capsys, video_path):
project_id = os.environ["GOOGLE_CLOUD_PROJECT"]
model_id = "VOT282620667826798592"
beta_snippets.streaming_automl_object_tracking(video_path, project_id, model_id)
out, _ = capsys.readouterr()
assert "Track Id" in out

0 comments on commit a8f37db

Please sign in to comment.