diff --git a/google/cloud/videointelligence_v1/gapic/enums.py b/google/cloud/videointelligence_v1/gapic/enums.py
index c8757091..3115c183 100644
--- a/google/cloud/videointelligence_v1/gapic/enums.py
+++ b/google/cloud/videointelligence_v1/gapic/enums.py
@@ -28,11 +28,12 @@ class Feature(enum.IntEnum):
LABEL_DETECTION (int): Label detection. Detect objects, such as dog or flower.
SHOT_CHANGE_DETECTION (int): Shot change detection.
EXPLICIT_CONTENT_DETECTION (int): Explicit content detection.
- FACE_DETECTION (int): Human face detection and tracking.
+ FACE_DETECTION (int): Human face detection.
SPEECH_TRANSCRIPTION (int): Speech transcription.
TEXT_DETECTION (int): OCR text detection and tracking.
OBJECT_TRACKING (int): Object detection and tracking.
LOGO_RECOGNITION (int): Logo detection, tracking, and recognition.
+ PERSON_DETECTION (int): Person detection.
"""
FEATURE_UNSPECIFIED = 0
@@ -44,6 +45,7 @@ class Feature(enum.IntEnum):
TEXT_DETECTION = 7
OBJECT_TRACKING = 9
LOGO_RECOGNITION = 12
+ PERSON_DETECTION = 14
class LabelDetectionMode(enum.IntEnum):
diff --git a/google/cloud/videointelligence_v1/gapic/video_intelligence_service_client.py b/google/cloud/videointelligence_v1/gapic/video_intelligence_service_client.py
index 9e10eaa9..d62f41f8 100644
--- a/google/cloud/videointelligence_v1/gapic/video_intelligence_service_client.py
+++ b/google/cloud/videointelligence_v1/gapic/video_intelligence_service_client.py
@@ -47,7 +47,7 @@
class VideoIntelligenceServiceClient(object):
- """Service that implements Google Cloud Video Intelligence API."""
+ """Service that implements the Video Intelligence API."""
SERVICE_ADDRESS = "videointelligence.googleapis.com:443"
"""The default address of the service."""
@@ -233,33 +233,33 @@ def annotate_video(
Args:
features (list[~google.cloud.videointelligence_v1.types.Feature]): Required. Requested video annotation features.
- input_uri (str): Input video location. Currently, only `Google Cloud
- Storage `__ URIs are supported, which
+ input_uri (str): Input video location. Currently, only `Cloud
+ Storage `__ URIs are supported. URIs
must be specified in the following format: ``gs://bucket-id/object-id``
(other URI formats return ``google.rpc.Code.INVALID_ARGUMENT``). For
more information, see `Request
- URIs `__. A
- video URI may include wildcards in ``object-id``, and thus identify
- multiple videos. Supported wildcards: '*' to match 0 or more characters;
+ URIs `__. To
+ identify multiple videos, a video URI may include wildcards in the
+ ``object-id``. Supported wildcards: '*' to match 0 or more characters;
'?' to match 1 character. If unset, the input video should be embedded
- in the request as ``input_content``. If set, ``input_content`` should be
+ in the request as ``input_content``. If set, ``input_content`` must be
unset.
input_content (bytes): The video data bytes. If unset, the input video(s) should be
- specified via ``input_uri``. If set, ``input_uri`` should be unset.
+ specified via the ``input_uri``. If set, ``input_uri`` must be unset.
video_context (Union[dict, ~google.cloud.videointelligence_v1.types.VideoContext]): Additional video context and/or feature-specific parameters.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.videointelligence_v1.types.VideoContext`
output_uri (str): Optional. Location where the output (in JSON format) should be
- stored. Currently, only `Google Cloud
- Storage `__ URIs are supported, which
+ stored. Currently, only `Cloud
+ Storage `__ URIs are supported. These
must be specified in the following format: ``gs://bucket-id/object-id``
(other URI formats return ``google.rpc.Code.INVALID_ARGUMENT``). For
more information, see `Request
URIs `__.
location_id (str): Optional. Cloud region where annotation should take place. Supported
- cloud regions: ``us-east1``, ``us-west1``, ``europe-west1``,
- ``asia-east1``. If no region is specified, a region will be determined
+ cloud regions are: ``us-east1``, ``us-west1``, ``europe-west1``,
+ ``asia-east1``. If no region is specified, the region will be determined
based on video file location.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
diff --git a/google/cloud/videointelligence_v1/gapic/video_intelligence_service_client_config.py b/google/cloud/videointelligence_v1/gapic/video_intelligence_service_client_config.py
index 74dc2121..bcc8d4f3 100644
--- a/google/cloud/videointelligence_v1/gapic/video_intelligence_service_client_config.py
+++ b/google/cloud/videointelligence_v1/gapic/video_intelligence_service_client_config.py
@@ -2,25 +2,34 @@
"interfaces": {
"google.cloud.videointelligence.v1.VideoIntelligenceService": {
"retry_codes": {
- "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
- "non_idempotent": [],
+ "retry_policy_1_codes": ["UNAVAILABLE", "DEADLINE_EXCEEDED"],
+ "no_retry_codes": [],
},
"retry_params": {
- "default": {
+ "retry_policy_1_params": {
"initial_retry_delay_millis": 1000,
"retry_delay_multiplier": 2.5,
"max_retry_delay_millis": 120000,
- "initial_rpc_timeout_millis": 120000,
+ "initial_rpc_timeout_millis": 600000,
"rpc_timeout_multiplier": 1.0,
- "max_rpc_timeout_millis": 120000,
+ "max_rpc_timeout_millis": 600000,
"total_timeout_millis": 600000,
- }
+ },
+ "no_retry_params": {
+ "initial_retry_delay_millis": 0,
+ "retry_delay_multiplier": 0.0,
+ "max_retry_delay_millis": 0,
+ "initial_rpc_timeout_millis": 0,
+ "rpc_timeout_multiplier": 1.0,
+ "max_rpc_timeout_millis": 0,
+ "total_timeout_millis": 0,
+ },
},
"methods": {
"AnnotateVideo": {
- "timeout_millis": 60000,
- "retry_codes_name": "idempotent",
- "retry_params_name": "default",
+ "timeout_millis": 600000,
+ "retry_codes_name": "retry_policy_1_codes",
+ "retry_params_name": "retry_policy_1_params",
}
},
}
diff --git a/google/cloud/videointelligence_v1/proto/video_intelligence.proto b/google/cloud/videointelligence_v1/proto/video_intelligence.proto
index 9dcfda55..5b3a3e34 100644
--- a/google/cloud/videointelligence_v1/proto/video_intelligence.proto
+++ b/google/cloud/videointelligence_v1/proto/video_intelligence.proto
@@ -32,16 +32,18 @@ option java_package = "com.google.cloud.videointelligence.v1";
option php_namespace = "Google\\Cloud\\VideoIntelligence\\V1";
option ruby_package = "Google::Cloud::VideoIntelligence::V1";
-// Service that implements Google Cloud Video Intelligence API.
+// Service that implements the Video Intelligence API.
service VideoIntelligenceService {
option (google.api.default_host) = "videointelligence.googleapis.com";
- option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
+ option (google.api.oauth_scopes) =
+ "https://www.googleapis.com/auth/cloud-platform";
// Performs asynchronous video annotation. Progress and results can be
// retrieved through the `google.longrunning.Operations` interface.
// `Operation.metadata` contains `AnnotateVideoProgress` (progress).
// `Operation.response` contains `AnnotateVideoResponse` (results).
- rpc AnnotateVideo(AnnotateVideoRequest) returns (google.longrunning.Operation) {
+ rpc AnnotateVideo(AnnotateVideoRequest)
+ returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1/videos:annotate"
body: "*"
@@ -57,20 +59,21 @@ service VideoIntelligenceService {
// Video annotation request.
message AnnotateVideoRequest {
// Input video location. Currently, only
- // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
- // supported, which must be specified in the following format:
+ // [Cloud Storage](https://cloud.google.com/storage/) URIs are
+ // supported. URIs must be specified in the following format:
// `gs://bucket-id/object-id` (other URI formats return
- // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
- // [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
- // A video URI may include wildcards in `object-id`, and thus identify
- // multiple videos. Supported wildcards: '*' to match 0 or more characters;
+ // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
+ // more information, see [Request
+ // URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
+ // multiple videos, a video URI may include wildcards in the `object-id`.
+ // Supported wildcards: '*' to match 0 or more characters;
// '?' to match 1 character. If unset, the input video should be embedded
- // in the request as `input_content`. If set, `input_content` should be unset.
+ // in the request as `input_content`. If set, `input_content` must be unset.
string input_uri = 1;
// The video data bytes.
- // If unset, the input video(s) should be specified via `input_uri`.
- // If set, `input_uri` should be unset.
+ // If unset, the input video(s) should be specified via the `input_uri`.
+ // If set, `input_uri` must be unset.
bytes input_content = 6;
// Required. Requested video annotation features.
@@ -80,16 +83,18 @@ message AnnotateVideoRequest {
VideoContext video_context = 3;
// Optional. Location where the output (in JSON format) should be stored.
- // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
- // URIs are supported, which must be specified in the following format:
+ // Currently, only [Cloud Storage](https://cloud.google.com/storage/)
+ // URIs are supported. These must be specified in the following format:
// `gs://bucket-id/object-id` (other URI formats return
- // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
- // [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
+ // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
+ // more information, see [Request
+ // URIs](https://cloud.google.com/storage/docs/request-endpoints).
string output_uri = 4 [(google.api.field_behavior) = OPTIONAL];
// Optional. Cloud region where annotation should take place. Supported cloud
- // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
- // is specified, a region will be determined based on video file location.
+ // regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
+ // region is specified, the region will be determined based on video file
+ // location.
string location_id = 5 [(google.api.field_behavior) = OPTIONAL];
}
@@ -118,6 +123,9 @@ message VideoContext {
// Config for TEXT_DETECTION.
TextDetectionConfig text_detection_config = 8;
+ // Config for PERSON_DETECTION.
+ PersonDetectionConfig person_detection_config = 11;
+
// Config for OBJECT_TRACKING.
ObjectTrackingConfig object_tracking_config = 13;
}
@@ -136,7 +144,7 @@ enum Feature {
// Explicit content detection.
EXPLICIT_CONTENT_DETECTION = 3;
- // Human face detection and tracking.
+ // Human face detection.
FACE_DETECTION = 4;
// Speech transcription.
@@ -150,6 +158,9 @@ enum Feature {
// Logo detection, tracking, and recognition.
LOGO_RECOGNITION = 12;
+
+ // Person detection.
+ PERSON_DETECTION = 14;
}
// Label detection mode.
@@ -195,9 +206,9 @@ message LabelDetectionConfig {
// If unspecified, defaults to `SHOT_MODE`.
LabelDetectionMode label_detection_mode = 1;
- // Whether the video has been shot from a stationary (i.e. non-moving) camera.
- // When set to true, might improve detection accuracy for moving objects.
- // Should be used with `SHOT_AND_FRAME_MODE` enabled.
+ // Whether the video has been shot from a stationary (i.e., non-moving)
+ // camera. When set to true, might improve detection accuracy for moving
+ // objects. Should be used with `SHOT_AND_FRAME_MODE` enabled.
bool stationary_camera = 2;
// Model to use for label detection.
@@ -209,15 +220,15 @@ message LabelDetectionConfig {
// frame-level detection. If not set, it is set to 0.4 by default. The valid
// range for this threshold is [0.1, 0.9]. Any value set outside of this
// range will be clipped.
- // Note: for best results please follow the default threshold. We will update
+ // Note: For best results, follow the default threshold. We will update
// the default threshold everytime when we release a new model.
float frame_confidence_threshold = 4;
// The confidence threshold we perform filtering on the labels from
- // video-level and shot-level detections. If not set, it is set to 0.3 by
+ // video-level and shot-level detections. If not set, it's set to 0.3 by
// default. The valid range for this threshold is [0.1, 0.9]. Any value set
// outside of this range will be clipped.
- // Note: for best results please follow the default threshold. We will update
+ // Note: For best results, follow the default threshold. We will update
// the default threshold everytime when we release a new model.
float video_confidence_threshold = 5;
}
@@ -245,8 +256,29 @@ message FaceDetectionConfig {
// "builtin/latest".
string model = 1;
- // Whether bounding boxes be included in the face annotation output.
+ // Whether bounding boxes are included in the face annotation output.
bool include_bounding_boxes = 2;
+
+ // Whether to enable face attributes detection, such as glasses, dark_glasses,
+ // mouth_open etc. Ignored if 'include_bounding_boxes' is set to false.
+ bool include_attributes = 5;
+}
+
+// Config for PERSON_DETECTION.
+message PersonDetectionConfig {
+ // Whether bounding boxes are included in the person detection annotation
+ // output.
+ bool include_bounding_boxes = 1;
+
+ // Whether to enable pose landmarks detection. Ignored if
+ // 'include_bounding_boxes' is set to false.
+ bool include_pose_landmarks = 2;
+
+ // Whether to enable person attributes detection, such as cloth color (black,
+ // blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair,
+ // etc.
+ // Ignored if 'include_bounding_boxes' is set to false.
+ bool include_attributes = 3;
}
// Config for EXPLICIT_CONTENT_DETECTION.
@@ -309,7 +341,7 @@ message Entity {
// API](https://developers.google.com/knowledge-graph/).
string entity_id = 1;
- // Textual description, e.g. `Fixed-gear bicycle`.
+ // Textual description, e.g., `Fixed-gear bicycle`.
string description = 2;
// Language code for `description` in BCP-47 format.
@@ -322,9 +354,9 @@ message LabelAnnotation {
Entity entity = 1;
// Common categories for the detected entity.
- // E.g. when the label is `Terrier` the category is likely `dog`. And in some
- // cases there might be more than one categories e.g. `Terrier` could also be
- // a `pet`.
+ // For example, when the label is `Terrier`, the category is likely `dog`. And
+ // in some cases there might be more than one categories e.g., `Terrier` could
+ // also be a `pet`.
repeated Entity category_entities = 2;
// All video segments where a label was detected.
@@ -332,6 +364,9 @@ message LabelAnnotation {
// All video frames where a label was detected.
repeated LabelFrame frames = 4;
+
+ // Feature version.
+ string version = 5;
}
// Video frame level annotation results for explicit content.
@@ -350,6 +385,9 @@ message ExplicitContentFrame {
message ExplicitContentAnnotation {
// All video frames where explicit content was detected.
repeated ExplicitContentFrame frames = 1;
+
+ // Feature version.
+ string version = 2;
}
// Normalized bounding box.
@@ -369,14 +407,31 @@ message NormalizedBoundingBox {
float bottom = 4;
}
+// Face detection annotation.
+message FaceDetectionAnnotation {
+ // Feature version.
+ string version = 5;
+}
+
+// Person detection annotation per video.
+message PersonDetectionAnnotation {
+ // The detected tracks of a person.
+ repeated Track tracks = 1;
+
+ // Feature version.
+ string version = 2;
+}
+
// Video segment level annotation results for face detection.
message FaceSegment {
// Video segment where a face was detected.
VideoSegment segment = 1;
}
-// Video frame level annotation results for face detection.
+// Deprecated. No effect.
message FaceFrame {
+ option deprecated = true;
+
// Normalized Bounding boxes in a frame.
// There can be more than one boxes if the same face is detected in multiple
// locations within the current frame.
@@ -387,8 +442,10 @@ message FaceFrame {
google.protobuf.Duration time_offset = 2;
}
-// Face annotation.
+// Deprecated. No effect.
message FaceAnnotation {
+ option deprecated = true;
+
// Thumbnail of a representative face view (in JPEG format).
bytes thumbnail = 1;
@@ -411,10 +468,12 @@ message TimestampedObject {
google.protobuf.Duration time_offset = 2;
// Optional. The attributes of the object in the bounding box.
- repeated DetectedAttribute attributes = 3 [(google.api.field_behavior) = OPTIONAL];
+ repeated DetectedAttribute attributes = 3
+ [(google.api.field_behavior) = OPTIONAL];
// Optional. The detected landmarks.
- repeated DetectedLandmark landmarks = 4 [(google.api.field_behavior) = OPTIONAL];
+ repeated DetectedLandmark landmarks = 4
+ [(google.api.field_behavior) = OPTIONAL];
}
// A track of an object instance.
@@ -426,7 +485,8 @@ message Track {
repeated TimestampedObject timestamped_objects = 2;
// Optional. Attributes in the track level.
- repeated DetectedAttribute attributes = 3 [(google.api.field_behavior) = OPTIONAL];
+ repeated DetectedAttribute attributes = 3
+ [(google.api.field_behavior) = OPTIONAL];
// Optional. The confidence score of the tracked object.
float confidence = 4 [(google.api.field_behavior) = OPTIONAL];
@@ -434,7 +494,7 @@ message Track {
// A generic detected attribute represented by name in string format.
message DetectedAttribute {
- // The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.
+ // The name of the attribute, for example, glasses, dark_glasses, mouth_open.
// A full list of supported type names will be provided in the document.
string name = 1;
@@ -449,7 +509,7 @@ message DetectedAttribute {
// A generic detected landmark represented by name in string format and a 2D
// location.
message DetectedLandmark {
- // The name of this landmark, i.e. left_hand, right_shoulder.
+ // The name of this landmark, for example, left_hand, right_shoulder.
string name = 1;
// The 2D point of the detected landmark using the normalized image
@@ -463,17 +523,17 @@ message DetectedLandmark {
// Annotation results for a single video.
message VideoAnnotationResults {
// Video file location in
- // [Google Cloud Storage](https://cloud.google.com/storage/).
+ // [Cloud Storage](https://cloud.google.com/storage/).
string input_uri = 1;
// Video segment on which the annotation is run.
VideoSegment segment = 10;
- // Topical label annotations on video level or user specified segment level.
+ // Topical label annotations on video level or user-specified segment level.
// There is exactly one element for each unique label.
repeated LabelAnnotation segment_label_annotations = 2;
- // Presence label annotations on video level or user specified segment level.
+ // Presence label annotations on video level or user-specified segment level.
// There is exactly one element for each unique label. Compared to the
// existing topical `segment_label_annotations`, this field presents more
// fine-grained, segment-level labels detected in video content and is made
@@ -496,8 +556,11 @@ message VideoAnnotationResults {
// There is exactly one element for each unique label.
repeated LabelAnnotation frame_label_annotations = 4;
- // Face annotations. There is exactly one element for each unique face.
- repeated FaceAnnotation face_annotations = 5;
+ // Deprecated. Please use `face_detection_annotations` instead.
+ repeated FaceAnnotation face_annotations = 5 [deprecated = true];
+
+ // Face detection annotations.
+ repeated FaceDetectionAnnotation face_detection_annotations = 13;
// Shot annotations. Each shot is represented as a video segment.
repeated VideoSegment shot_annotations = 6;
@@ -519,6 +582,9 @@ message VideoAnnotationResults {
// Annotations for list of logos detected, tracked and recognized in video.
repeated LogoRecognitionAnnotation logo_recognition_annotations = 19;
+ // Person detection annotations.
+ repeated PersonDetectionAnnotation person_detection_annotations = 20;
+
// If set, indicates an error. Note that for a single `AnnotateVideoRequest`
// some videos may succeed and some may fail.
google.rpc.Status error = 9;
@@ -535,7 +601,7 @@ message AnnotateVideoResponse {
// Annotation progress for a single video.
message VideoAnnotationProgress {
// Video file location in
- // [Google Cloud Storage](https://cloud.google.com/storage/).
+ // [Cloud Storage](https://cloud.google.com/storage/).
string input_uri = 1;
// Approximate percentage processed thus far. Guaranteed to be
@@ -549,11 +615,11 @@ message VideoAnnotationProgress {
google.protobuf.Timestamp update_time = 4;
// Specifies which feature is being tracked if the request contains more than
- // one features.
+ // one feature.
Feature feature = 5;
// Specifies which segment is being tracked if the request contains more than
- // one segments.
+ // one segment.
VideoSegment segment = 6;
}
@@ -588,7 +654,8 @@ message SpeechTranscriptionConfig {
bool filter_profanity = 3 [(google.api.field_behavior) = OPTIONAL];
// Optional. A means to provide context to assist the speech recognition.
- repeated SpeechContext speech_contexts = 4 [(google.api.field_behavior) = OPTIONAL];
+ repeated SpeechContext speech_contexts = 4
+ [(google.api.field_behavior) = OPTIONAL];
// Optional. If 'true', adds punctuation to recognition result hypotheses.
// This feature is only available in select languages. Setting this for
@@ -596,7 +663,8 @@ message SpeechTranscriptionConfig {
// does not add punctuation to result hypotheses. NOTE: "This is currently
// offered as an experimental service, complimentary to all users. In the
// future this may be exclusively available as a premium feature."
- bool enable_automatic_punctuation = 5 [(google.api.field_behavior) = OPTIONAL];
+ bool enable_automatic_punctuation = 5
+ [(google.api.field_behavior) = OPTIONAL];
// Optional. For file formats, such as MXF or MKV, supporting multiple audio
// tracks, specify up to two tracks. Default: track 0.
@@ -606,14 +674,14 @@ message SpeechTranscriptionConfig {
// the top alternative of the recognition result using a speaker_tag provided
// in the WordInfo.
// Note: When this is true, we send all the words from the beginning of the
- // audio for the top alternative in every consecutive responses.
+ // audio for the top alternative in every consecutive response.
// This is done in order to improve our speaker tags as our models learn to
// identify the speakers in the conversation over time.
bool enable_speaker_diarization = 7 [(google.api.field_behavior) = OPTIONAL];
- // Optional. If set, specifies the estimated number of speakers in the conversation.
- // If not set, defaults to '2'.
- // Ignored unless enable_speaker_diarization is set to true.
+ // Optional. If set, specifies the estimated number of speakers in the
+ // conversation. If not set, defaults to '2'. Ignored unless
+ // enable_speaker_diarization is set to true.
int32 diarization_speaker_count = 8 [(google.api.field_behavior) = OPTIONAL];
// Optional. If `true`, the top result includes a list of words and the
@@ -642,9 +710,9 @@ message SpeechTranscription {
// ranked by the recognizer.
repeated SpeechRecognitionAlternative alternatives = 1;
- // Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of
- // the language in this result. This language code was detected to have the
- // most likelihood of being spoken in the audio.
+ // Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
+ // language tag of the language in this result. This language code was
+ // detected to have the most likelihood of being spoken in the audio.
string language_code = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
}
@@ -662,8 +730,8 @@ message SpeechRecognitionAlternative {
float confidence = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. A list of word-specific information for each recognized word.
- // Note: When `enable_speaker_diarization` is true, you will see all the words
- // from the beginning of the audio.
+ // Note: When `enable_speaker_diarization` is set to true, you will see all
+ // the words from the beginning of the audio.
repeated WordInfo words = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
}
@@ -767,6 +835,9 @@ message TextAnnotation {
// All video segments where OCR detected text appears.
repeated TextSegment segments = 2;
+
+ // Feature version.
+ string version = 3;
}
// Video frame level annotations for object detection and tracking. This field
@@ -808,6 +879,9 @@ message ObjectTrackingAnnotation {
// messages in frames.
// Streaming mode: it can only be one ObjectTrackingFrame message in frames.
repeated ObjectTrackingFrame frames = 2;
+
+ // Feature version.
+ string version = 6;
}
// Annotation corresponding to one detected, tracked and recognized logo class.
diff --git a/google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py
index 71997f19..7912c6ee 100644
--- a/google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py
+++ b/google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/videointelligence_v1/proto/video_intelligence.proto
-
+"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
@@ -30,7 +30,7 @@
syntax="proto3",
serialized_options=b"\n%com.google.cloud.videointelligence.v1B\035VideoIntelligenceServiceProtoP\001ZRgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1;videointelligence\252\002!Google.Cloud.VideoIntelligence.V1\312\002!Google\\Cloud\\VideoIntelligence\\V1\352\002$Google::Cloud::VideoIntelligence::V1",
create_key=_descriptor._internal_create_key,
- serialized_pb=b'\n@google/cloud/videointelligence_v1/proto/video_intelligence.proto\x12!google.cloud.videointelligence.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\xfe\x01\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12\x41\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32*.google.cloud.videointelligence.v1.FeatureB\x03\xe0\x41\x02\x12\x46\n\rvideo_context\x18\x03 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoContext\x12\x17\n\noutput_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0blocation_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\xe6\x05\n\x0cVideoContext\x12\x41\n\x08segments\x18\x01 \x03(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12W\n\x16label_detection_config\x18\x02 \x01(\x0b\x32\x37.google.cloud.videointelligence.v1.LabelDetectionConfig\x12\x62\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32<.google.cloud.videointelligence.v1.ShotChangeDetectionConfig\x12l\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32\x41.google.cloud.videointelligence.v1.ExplicitContentDetectionConfig\x12U\n\x15\x66\x61\x63\x65_detection_config\x18\x05 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1.FaceDetectionConfig\x12\x61\n\x1bspeech_transcription_config\x18\x06 \x01(\x0b\x32<.google.cloud.videointelligence.v1.SpeechTranscriptionConfig\x12U\n\x15text_detection_config\x18\x08 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1.TextDetectionConfig\x12W\n\x16object_tracking_config\x18\r \x01(\x0b\x32\x37.google.cloud.videointelligence.v1.ObjectTrackingConfig"\xdd\x01\n\x14LabelDetectionConfig\x12S\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32\x35.google.cloud.videointelligence.v1.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t\x12"\n\x1a\x66rame_confidence_threshold\x18\x04 \x01(\x02\x12"\n\x1avideo_confidence_threshold\x18\x05 \x01(\x02"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"%\n\x14ObjectTrackingConfig\x12\r\n\x05model\x18\x01 \x01(\t"D\n\x13\x46\x61\x63\x65\x44\x65tectionConfig\x12\r\n\x05model\x18\x01 \x01(\t\x12\x1e\n\x16include_bounding_boxes\x18\x02 \x01(\x08"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"<\n\x13TextDetectionConfig\x12\x16\n\x0elanguage_hints\x18\x01 \x03(\t\x12\r\n\x05model\x18\x02 \x01(\t"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"d\n\x0cLabelSegment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\x94\x02\n\x0fLabelAnnotation\x12\x39\n\x06\x65ntity\x18\x01 \x01(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x44\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x41\n\x08segments\x18\x03 \x03(\x0b\x32/.google.cloud.videointelligence.v1.LabelSegment\x12=\n\x06\x66rames\x18\x04 \x03(\x0b\x32-.google.cloud.videointelligence.v1.LabelFrame"\x95\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12M\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32-.google.cloud.videointelligence.v1.Likelihood"d\n\x19\x45xplicitContentAnnotation\x12G\n\x06\x66rames\x18\x01 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1.ExplicitContentFrame"Q\n\x15NormalizedBoundingBox\x12\x0c\n\x04left\x18\x01 \x01(\x02\x12\x0b\n\x03top\x18\x02 \x01(\x02\x12\r\n\x05right\x18\x03 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x04 \x01(\x02"O\n\x0b\x46\x61\x63\x65Segment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment"\x98\x01\n\tFaceFrame\x12[\n\x19normalized_bounding_boxes\x18\x01 \x03(\x0b\x32\x38.google.cloud.videointelligence.v1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\xa3\x01\n\x0e\x46\x61\x63\x65\x41nnotation\x12\x11\n\tthumbnail\x18\x01 \x01(\x0c\x12@\n\x08segments\x18\x02 \x03(\x0b\x32..google.cloud.videointelligence.v1.FaceSegment\x12<\n\x06\x66rames\x18\x03 \x03(\x0b\x32,.google.cloud.videointelligence.v1.FaceFrame"\xba\x02\n\x11TimestampedObject\x12Y\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32\x38.google.cloud.videointelligence.v1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12M\n\nattributes\x18\x03 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1.DetectedAttributeB\x03\xe0\x41\x01\x12K\n\tlandmarks\x18\x04 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1.DetectedLandmarkB\x03\xe0\x41\x01"\x84\x02\n\x05Track\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12Q\n\x13timestamped_objects\x18\x02 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1.TimestampedObject\x12M\n\nattributes\x18\x03 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1.DetectedAttributeB\x03\xe0\x41\x01\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x01"D\n\x11\x44\x65tectedAttribute\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\r\n\x05value\x18\x03 \x01(\t"x\n\x10\x44\x65tectedLandmark\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x42\n\x05point\x18\x02 \x01(\x0b\x32\x33.google.cloud.videointelligence.v1.NormalizedVertex\x12\x12\n\nconfidence\x18\x03 \x01(\x02"\xa1\t\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12@\n\x07segment\x18\n \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12U\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12^\n"segment_presence_label_annotations\x18\x17 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12R\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12[\n\x1fshot_presence_label_annotations\x18\x18 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12S\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12K\n\x10\x66\x61\x63\x65_annotations\x18\x05 \x03(\x0b\x32\x31.google.cloud.videointelligence.v1.FaceAnnotation\x12I\n\x10shot_annotations\x18\x06 \x03(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12Y\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32<.google.cloud.videointelligence.v1.ExplicitContentAnnotation\x12U\n\x15speech_transcriptions\x18\x0b \x03(\x0b\x32\x36.google.cloud.videointelligence.v1.SpeechTranscription\x12K\n\x10text_annotations\x18\x0c \x03(\x0b\x32\x31.google.cloud.videointelligence.v1.TextAnnotation\x12W\n\x12object_annotations\x18\x0e \x03(\x0b\x32;.google.cloud.videointelligence.v1.ObjectTrackingAnnotation\x12\x62\n\x1clogo_recognition_annotations\x18\x13 \x03(\x0b\x32<.google.cloud.videointelligence.v1.LogoRecognitionAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"n\n\x15\x41nnotateVideoResponse\x12U\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1.VideoAnnotationResults"\xa6\x02\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12;\n\x07\x66\x65\x61ture\x18\x05 \x01(\x0e\x32*.google.cloud.videointelligence.v1.Feature\x12@\n\x07segment\x18\x06 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment"p\n\x15\x41nnotateVideoProgress\x12W\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32:.google.cloud.videointelligence.v1.VideoAnnotationProgress"\x81\x03\n\x19SpeechTranscriptionConfig\x12\x1a\n\rlanguage_code\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x1d\n\x10max_alternatives\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1d\n\x10\x66ilter_profanity\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12N\n\x0fspeech_contexts\x18\x04 \x03(\x0b\x32\x30.google.cloud.videointelligence.v1.SpeechContextB\x03\xe0\x41\x01\x12)\n\x1c\x65nable_automatic_punctuation\x18\x05 \x01(\x08\x42\x03\xe0\x41\x01\x12\x19\n\x0c\x61udio_tracks\x18\x06 \x03(\x05\x42\x03\xe0\x41\x01\x12\'\n\x1a\x65nable_speaker_diarization\x18\x07 \x01(\x08\x42\x03\xe0\x41\x01\x12&\n\x19\x64iarization_speaker_count\x18\x08 \x01(\x05\x42\x03\xe0\x41\x01\x12#\n\x16\x65nable_word_confidence\x18\t \x01(\x08\x42\x03\xe0\x41\x01"%\n\rSpeechContext\x12\x14\n\x07phrases\x18\x01 \x03(\tB\x03\xe0\x41\x01"\x88\x01\n\x13SpeechTranscription\x12U\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32?.google.cloud.videointelligence.v1.SpeechRecognitionAlternative\x12\x1a\n\rlanguage_code\x18\x02 \x01(\tB\x03\xe0\x41\x03"\x8c\x01\n\x1cSpeechRecognitionAlternative\x12\x12\n\ntranscript\x18\x01 \x01(\t\x12\x17\n\nconfidence\x18\x02 \x01(\x02\x42\x03\xe0\x41\x03\x12?\n\x05words\x18\x03 \x03(\x0b\x32+.google.cloud.videointelligence.v1.WordInfoB\x03\xe0\x41\x03"\xa7\x01\n\x08WordInfo\x12-\n\nstart_time\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12+\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0c\n\x04word\x18\x03 \x01(\t\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x03\x12\x18\n\x0bspeaker_tag\x18\x05 \x01(\x05\x42\x03\xe0\x41\x03"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02"_\n\x16NormalizedBoundingPoly\x12\x45\n\x08vertices\x18\x01 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1.NormalizedVertex"\xa1\x01\n\x0bTextSegment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12<\n\x06\x66rames\x18\x03 \x03(\x0b\x32,.google.cloud.videointelligence.v1.TextFrame"\x94\x01\n\tTextFrame\x12W\n\x14rotated_bounding_box\x18\x01 \x01(\x0b\x32\x39.google.cloud.videointelligence.v1.NormalizedBoundingPoly\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"`\n\x0eTextAnnotation\x12\x0c\n\x04text\x18\x01 \x01(\t\x12@\n\x08segments\x18\x02 \x03(\x0b\x32..google.cloud.videointelligence.v1.TextSegment"\xa0\x01\n\x13ObjectTrackingFrame\x12Y\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32\x38.google.cloud.videointelligence.v1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\x97\x02\n\x18ObjectTrackingAnnotation\x12\x42\n\x07segment\x18\x03 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegmentH\x00\x12\x12\n\x08track_id\x18\x05 \x01(\x03H\x00\x12\x39\n\x06\x65ntity\x18\x01 \x01(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12\x46\n\x06\x66rames\x18\x02 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1.ObjectTrackingFrameB\x0c\n\ntrack_info"\xd3\x01\n\x19LogoRecognitionAnnotation\x12\x39\n\x06\x65ntity\x18\x01 \x01(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x38\n\x06tracks\x18\x02 \x03(\x0b\x32(.google.cloud.videointelligence.v1.Track\x12\x41\n\x08segments\x18\x03 \x03(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment*\xdf\x01\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x12\n\x0e\x46\x41\x43\x45_DETECTION\x10\x04\x12\x18\n\x14SPEECH_TRANSCRIPTION\x10\x06\x12\x12\n\x0eTEXT_DETECTION\x10\x07\x12\x13\n\x0fOBJECT_TRACKING\x10\t\x12\x14\n\x10LOGO_RECOGNITION\x10\x0c*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xc0\x02\n\x18VideoIntelligenceService\x12\xcd\x01\n\rAnnotateVideo\x12\x37.google.cloud.videointelligence.v1.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"d\x82\xd3\xe4\x93\x02\x18"\x13/v1/videos:annotate:\x01*\xda\x41\x12input_uri,features\xca\x41.\n\x15\x41nnotateVideoResponse\x12\x15\x41nnotateVideoProgress\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\x8b\x02\n%com.google.cloud.videointelligence.v1B\x1dVideoIntelligenceServiceProtoP\x01ZRgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1;videointelligence\xaa\x02!Google.Cloud.VideoIntelligence.V1\xca\x02!Google\\Cloud\\VideoIntelligence\\V1\xea\x02$Google::Cloud::VideoIntelligence::V1b\x06proto3',
+ serialized_pb=b'\n@google/cloud/videointelligence_v1/proto/video_intelligence.proto\x12!google.cloud.videointelligence.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\xfe\x01\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12\x41\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32*.google.cloud.videointelligence.v1.FeatureB\x03\xe0\x41\x02\x12\x46\n\rvideo_context\x18\x03 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoContext\x12\x17\n\noutput_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0blocation_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\xc1\x06\n\x0cVideoContext\x12\x41\n\x08segments\x18\x01 \x03(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12W\n\x16label_detection_config\x18\x02 \x01(\x0b\x32\x37.google.cloud.videointelligence.v1.LabelDetectionConfig\x12\x62\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32<.google.cloud.videointelligence.v1.ShotChangeDetectionConfig\x12l\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32\x41.google.cloud.videointelligence.v1.ExplicitContentDetectionConfig\x12U\n\x15\x66\x61\x63\x65_detection_config\x18\x05 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1.FaceDetectionConfig\x12\x61\n\x1bspeech_transcription_config\x18\x06 \x01(\x0b\x32<.google.cloud.videointelligence.v1.SpeechTranscriptionConfig\x12U\n\x15text_detection_config\x18\x08 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1.TextDetectionConfig\x12Y\n\x17person_detection_config\x18\x0b \x01(\x0b\x32\x38.google.cloud.videointelligence.v1.PersonDetectionConfig\x12W\n\x16object_tracking_config\x18\r \x01(\x0b\x32\x37.google.cloud.videointelligence.v1.ObjectTrackingConfig"\xdd\x01\n\x14LabelDetectionConfig\x12S\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32\x35.google.cloud.videointelligence.v1.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t\x12"\n\x1a\x66rame_confidence_threshold\x18\x04 \x01(\x02\x12"\n\x1avideo_confidence_threshold\x18\x05 \x01(\x02"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"%\n\x14ObjectTrackingConfig\x12\r\n\x05model\x18\x01 \x01(\t"`\n\x13\x46\x61\x63\x65\x44\x65tectionConfig\x12\r\n\x05model\x18\x01 \x01(\t\x12\x1e\n\x16include_bounding_boxes\x18\x02 \x01(\x08\x12\x1a\n\x12include_attributes\x18\x05 \x01(\x08"s\n\x15PersonDetectionConfig\x12\x1e\n\x16include_bounding_boxes\x18\x01 \x01(\x08\x12\x1e\n\x16include_pose_landmarks\x18\x02 \x01(\x08\x12\x1a\n\x12include_attributes\x18\x03 \x01(\x08"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"<\n\x13TextDetectionConfig\x12\x16\n\x0elanguage_hints\x18\x01 \x03(\t\x12\r\n\x05model\x18\x02 \x01(\t"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"d\n\x0cLabelSegment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\xa5\x02\n\x0fLabelAnnotation\x12\x39\n\x06\x65ntity\x18\x01 \x01(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x44\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x41\n\x08segments\x18\x03 \x03(\x0b\x32/.google.cloud.videointelligence.v1.LabelSegment\x12=\n\x06\x66rames\x18\x04 \x03(\x0b\x32-.google.cloud.videointelligence.v1.LabelFrame\x12\x0f\n\x07version\x18\x05 \x01(\t"\x95\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12M\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32-.google.cloud.videointelligence.v1.Likelihood"u\n\x19\x45xplicitContentAnnotation\x12G\n\x06\x66rames\x18\x01 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1.ExplicitContentFrame\x12\x0f\n\x07version\x18\x02 \x01(\t"Q\n\x15NormalizedBoundingBox\x12\x0c\n\x04left\x18\x01 \x01(\x02\x12\x0b\n\x03top\x18\x02 \x01(\x02\x12\r\n\x05right\x18\x03 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x04 \x01(\x02"*\n\x17\x46\x61\x63\x65\x44\x65tectionAnnotation\x12\x0f\n\x07version\x18\x05 \x01(\t"f\n\x19PersonDetectionAnnotation\x12\x38\n\x06tracks\x18\x01 \x03(\x0b\x32(.google.cloud.videointelligence.v1.Track\x12\x0f\n\x07version\x18\x02 \x01(\t"O\n\x0b\x46\x61\x63\x65Segment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment"\x9c\x01\n\tFaceFrame\x12[\n\x19normalized_bounding_boxes\x18\x01 \x03(\x0b\x32\x38.google.cloud.videointelligence.v1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration:\x02\x18\x01"\xa7\x01\n\x0e\x46\x61\x63\x65\x41nnotation\x12\x11\n\tthumbnail\x18\x01 \x01(\x0c\x12@\n\x08segments\x18\x02 \x03(\x0b\x32..google.cloud.videointelligence.v1.FaceSegment\x12<\n\x06\x66rames\x18\x03 \x03(\x0b\x32,.google.cloud.videointelligence.v1.FaceFrame:\x02\x18\x01"\xba\x02\n\x11TimestampedObject\x12Y\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32\x38.google.cloud.videointelligence.v1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12M\n\nattributes\x18\x03 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1.DetectedAttributeB\x03\xe0\x41\x01\x12K\n\tlandmarks\x18\x04 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1.DetectedLandmarkB\x03\xe0\x41\x01"\x84\x02\n\x05Track\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12Q\n\x13timestamped_objects\x18\x02 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1.TimestampedObject\x12M\n\nattributes\x18\x03 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1.DetectedAttributeB\x03\xe0\x41\x01\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x01"D\n\x11\x44\x65tectedAttribute\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\r\n\x05value\x18\x03 \x01(\t"x\n\x10\x44\x65tectedLandmark\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x42\n\x05point\x18\x02 \x01(\x0b\x32\x33.google.cloud.videointelligence.v1.NormalizedVertex\x12\x12\n\nconfidence\x18\x03 \x01(\x02"\xe9\n\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12@\n\x07segment\x18\n \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12U\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12^\n"segment_presence_label_annotations\x18\x17 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12R\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12[\n\x1fshot_presence_label_annotations\x18\x18 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12S\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12O\n\x10\x66\x61\x63\x65_annotations\x18\x05 \x03(\x0b\x32\x31.google.cloud.videointelligence.v1.FaceAnnotationB\x02\x18\x01\x12^\n\x1a\x66\x61\x63\x65_detection_annotations\x18\r \x03(\x0b\x32:.google.cloud.videointelligence.v1.FaceDetectionAnnotation\x12I\n\x10shot_annotations\x18\x06 \x03(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12Y\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32<.google.cloud.videointelligence.v1.ExplicitContentAnnotation\x12U\n\x15speech_transcriptions\x18\x0b \x03(\x0b\x32\x36.google.cloud.videointelligence.v1.SpeechTranscription\x12K\n\x10text_annotations\x18\x0c \x03(\x0b\x32\x31.google.cloud.videointelligence.v1.TextAnnotation\x12W\n\x12object_annotations\x18\x0e \x03(\x0b\x32;.google.cloud.videointelligence.v1.ObjectTrackingAnnotation\x12\x62\n\x1clogo_recognition_annotations\x18\x13 \x03(\x0b\x32<.google.cloud.videointelligence.v1.LogoRecognitionAnnotation\x12\x62\n\x1cperson_detection_annotations\x18\x14 \x03(\x0b\x32<.google.cloud.videointelligence.v1.PersonDetectionAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"n\n\x15\x41nnotateVideoResponse\x12U\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1.VideoAnnotationResults"\xa6\x02\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12;\n\x07\x66\x65\x61ture\x18\x05 \x01(\x0e\x32*.google.cloud.videointelligence.v1.Feature\x12@\n\x07segment\x18\x06 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment"p\n\x15\x41nnotateVideoProgress\x12W\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32:.google.cloud.videointelligence.v1.VideoAnnotationProgress"\x81\x03\n\x19SpeechTranscriptionConfig\x12\x1a\n\rlanguage_code\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x1d\n\x10max_alternatives\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1d\n\x10\x66ilter_profanity\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12N\n\x0fspeech_contexts\x18\x04 \x03(\x0b\x32\x30.google.cloud.videointelligence.v1.SpeechContextB\x03\xe0\x41\x01\x12)\n\x1c\x65nable_automatic_punctuation\x18\x05 \x01(\x08\x42\x03\xe0\x41\x01\x12\x19\n\x0c\x61udio_tracks\x18\x06 \x03(\x05\x42\x03\xe0\x41\x01\x12\'\n\x1a\x65nable_speaker_diarization\x18\x07 \x01(\x08\x42\x03\xe0\x41\x01\x12&\n\x19\x64iarization_speaker_count\x18\x08 \x01(\x05\x42\x03\xe0\x41\x01\x12#\n\x16\x65nable_word_confidence\x18\t \x01(\x08\x42\x03\xe0\x41\x01"%\n\rSpeechContext\x12\x14\n\x07phrases\x18\x01 \x03(\tB\x03\xe0\x41\x01"\x88\x01\n\x13SpeechTranscription\x12U\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32?.google.cloud.videointelligence.v1.SpeechRecognitionAlternative\x12\x1a\n\rlanguage_code\x18\x02 \x01(\tB\x03\xe0\x41\x03"\x8c\x01\n\x1cSpeechRecognitionAlternative\x12\x12\n\ntranscript\x18\x01 \x01(\t\x12\x17\n\nconfidence\x18\x02 \x01(\x02\x42\x03\xe0\x41\x03\x12?\n\x05words\x18\x03 \x03(\x0b\x32+.google.cloud.videointelligence.v1.WordInfoB\x03\xe0\x41\x03"\xa7\x01\n\x08WordInfo\x12-\n\nstart_time\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12+\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0c\n\x04word\x18\x03 \x01(\t\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x03\x12\x18\n\x0bspeaker_tag\x18\x05 \x01(\x05\x42\x03\xe0\x41\x03"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02"_\n\x16NormalizedBoundingPoly\x12\x45\n\x08vertices\x18\x01 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1.NormalizedVertex"\xa1\x01\n\x0bTextSegment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12<\n\x06\x66rames\x18\x03 \x03(\x0b\x32,.google.cloud.videointelligence.v1.TextFrame"\x94\x01\n\tTextFrame\x12W\n\x14rotated_bounding_box\x18\x01 \x01(\x0b\x32\x39.google.cloud.videointelligence.v1.NormalizedBoundingPoly\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"q\n\x0eTextAnnotation\x12\x0c\n\x04text\x18\x01 \x01(\t\x12@\n\x08segments\x18\x02 \x03(\x0b\x32..google.cloud.videointelligence.v1.TextSegment\x12\x0f\n\x07version\x18\x03 \x01(\t"\xa0\x01\n\x13ObjectTrackingFrame\x12Y\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32\x38.google.cloud.videointelligence.v1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\xa8\x02\n\x18ObjectTrackingAnnotation\x12\x42\n\x07segment\x18\x03 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegmentH\x00\x12\x12\n\x08track_id\x18\x05 \x01(\x03H\x00\x12\x39\n\x06\x65ntity\x18\x01 \x01(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12\x46\n\x06\x66rames\x18\x02 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1.ObjectTrackingFrame\x12\x0f\n\x07version\x18\x06 \x01(\tB\x0c\n\ntrack_info"\xd3\x01\n\x19LogoRecognitionAnnotation\x12\x39\n\x06\x65ntity\x18\x01 \x01(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x38\n\x06tracks\x18\x02 \x03(\x0b\x32(.google.cloud.videointelligence.v1.Track\x12\x41\n\x08segments\x18\x03 \x03(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment*\xf5\x01\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x12\n\x0e\x46\x41\x43\x45_DETECTION\x10\x04\x12\x18\n\x14SPEECH_TRANSCRIPTION\x10\x06\x12\x12\n\x0eTEXT_DETECTION\x10\x07\x12\x13\n\x0fOBJECT_TRACKING\x10\t\x12\x14\n\x10LOGO_RECOGNITION\x10\x0c\x12\x14\n\x10PERSON_DETECTION\x10\x0e*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xc0\x02\n\x18VideoIntelligenceService\x12\xcd\x01\n\rAnnotateVideo\x12\x37.google.cloud.videointelligence.v1.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"d\x82\xd3\xe4\x93\x02\x18"\x13/v1/videos:annotate:\x01*\xda\x41\x12input_uri,features\xca\x41.\n\x15\x41nnotateVideoResponse\x12\x15\x41nnotateVideoProgress\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\x8b\x02\n%com.google.cloud.videointelligence.v1B\x1dVideoIntelligenceServiceProtoP\x01ZRgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1;videointelligence\xaa\x02!Google.Cloud.VideoIntelligence.V1\xca\x02!Google\\Cloud\\VideoIntelligence\\V1\xea\x02$Google::Cloud::VideoIntelligence::V1b\x06proto3',
dependencies=[
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
google_dot_api_dot_client__pb2.DESCRIPTOR,
@@ -121,11 +121,19 @@
type=None,
create_key=_descriptor._internal_create_key,
),
+ _descriptor.EnumValueDescriptor(
+ name="PERSON_DETECTION",
+ index=9,
+ number=14,
+ serialized_options=None,
+ type=None,
+ create_key=_descriptor._internal_create_key,
+ ),
],
containing_type=None,
serialized_options=None,
- serialized_start=7779,
- serialized_end=8002,
+ serialized_start=8439,
+ serialized_end=8684,
)
_sym_db.RegisterEnumDescriptor(_FEATURE)
@@ -172,8 +180,8 @@
],
containing_type=None,
serialized_options=None,
- serialized_start=8004,
- serialized_end=8118,
+ serialized_start=8686,
+ serialized_end=8800,
)
_sym_db.RegisterEnumDescriptor(_LABELDETECTIONMODE)
@@ -236,8 +244,8 @@
],
containing_type=None,
serialized_options=None,
- serialized_start=8120,
- serialized_end=8236,
+ serialized_start=8802,
+ serialized_end=8918,
)
_sym_db.RegisterEnumDescriptor(_LIKELIHOOD)
@@ -251,6 +259,7 @@
TEXT_DETECTION = 7
OBJECT_TRACKING = 9
LOGO_RECOGNITION = 12
+PERSON_DETECTION = 14
LABEL_DETECTION_MODE_UNSPECIFIED = 0
SHOT_MODE = 1
FRAME_MODE = 2
@@ -540,10 +549,29 @@
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
+ _descriptor.FieldDescriptor(
+ name="person_detection_config",
+ full_name="google.cloud.videointelligence.v1.VideoContext.person_detection_config",
+ index=7,
+ number=11,
+ type=11,
+ cpp_type=10,
+ label=1,
+ has_default_value=False,
+ default_value=None,
+ message_type=None,
+ enum_type=None,
+ containing_type=None,
+ is_extension=False,
+ extension_scope=None,
+ serialized_options=None,
+ file=DESCRIPTOR,
+ create_key=_descriptor._internal_create_key,
+ ),
_descriptor.FieldDescriptor(
name="object_tracking_config",
full_name="google.cloud.videointelligence.v1.VideoContext.object_tracking_config",
- index=7,
+ index=8,
number=13,
type=11,
cpp_type=10,
@@ -569,7 +597,7 @@
extension_ranges=[],
oneofs=[],
serialized_start=576,
- serialized_end=1318,
+ serialized_end=1409,
)
@@ -685,8 +713,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=1321,
- serialized_end=1542,
+ serialized_start=1412,
+ serialized_end=1633,
)
@@ -726,8 +754,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=1544,
- serialized_end=1586,
+ serialized_start=1635,
+ serialized_end=1677,
)
@@ -767,8 +795,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=1588,
- serialized_end=1625,
+ serialized_start=1679,
+ serialized_end=1716,
)
@@ -818,6 +846,104 @@
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
+ _descriptor.FieldDescriptor(
+ name="include_attributes",
+ full_name="google.cloud.videointelligence.v1.FaceDetectionConfig.include_attributes",
+ index=2,
+ number=5,
+ type=8,
+ cpp_type=7,
+ label=1,
+ has_default_value=False,
+ default_value=False,
+ message_type=None,
+ enum_type=None,
+ containing_type=None,
+ is_extension=False,
+ extension_scope=None,
+ serialized_options=None,
+ file=DESCRIPTOR,
+ create_key=_descriptor._internal_create_key,
+ ),
+ ],
+ extensions=[],
+ nested_types=[],
+ enum_types=[],
+ serialized_options=None,
+ is_extendable=False,
+ syntax="proto3",
+ extension_ranges=[],
+ oneofs=[],
+ serialized_start=1718,
+ serialized_end=1814,
+)
+
+
+_PERSONDETECTIONCONFIG = _descriptor.Descriptor(
+ name="PersonDetectionConfig",
+ full_name="google.cloud.videointelligence.v1.PersonDetectionConfig",
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name="include_bounding_boxes",
+ full_name="google.cloud.videointelligence.v1.PersonDetectionConfig.include_bounding_boxes",
+ index=0,
+ number=1,
+ type=8,
+ cpp_type=7,
+ label=1,
+ has_default_value=False,
+ default_value=False,
+ message_type=None,
+ enum_type=None,
+ containing_type=None,
+ is_extension=False,
+ extension_scope=None,
+ serialized_options=None,
+ file=DESCRIPTOR,
+ create_key=_descriptor._internal_create_key,
+ ),
+ _descriptor.FieldDescriptor(
+ name="include_pose_landmarks",
+ full_name="google.cloud.videointelligence.v1.PersonDetectionConfig.include_pose_landmarks",
+ index=1,
+ number=2,
+ type=8,
+ cpp_type=7,
+ label=1,
+ has_default_value=False,
+ default_value=False,
+ message_type=None,
+ enum_type=None,
+ containing_type=None,
+ is_extension=False,
+ extension_scope=None,
+ serialized_options=None,
+ file=DESCRIPTOR,
+ create_key=_descriptor._internal_create_key,
+ ),
+ _descriptor.FieldDescriptor(
+ name="include_attributes",
+ full_name="google.cloud.videointelligence.v1.PersonDetectionConfig.include_attributes",
+ index=2,
+ number=3,
+ type=8,
+ cpp_type=7,
+ label=1,
+ has_default_value=False,
+ default_value=False,
+ message_type=None,
+ enum_type=None,
+ containing_type=None,
+ is_extension=False,
+ extension_scope=None,
+ serialized_options=None,
+ file=DESCRIPTOR,
+ create_key=_descriptor._internal_create_key,
+ ),
],
extensions=[],
nested_types=[],
@@ -827,8 +953,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=1627,
- serialized_end=1695,
+ serialized_start=1816,
+ serialized_end=1931,
)
@@ -868,8 +994,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=1697,
- serialized_end=1744,
+ serialized_start=1933,
+ serialized_end=1980,
)
@@ -928,8 +1054,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=1746,
- serialized_end=1806,
+ serialized_start=1982,
+ serialized_end=2042,
)
@@ -988,8 +1114,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=1808,
- serialized_end=1928,
+ serialized_start=2044,
+ serialized_end=2164,
)
@@ -1048,8 +1174,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=1930,
- serialized_end=2030,
+ serialized_start=2166,
+ serialized_end=2266,
)
@@ -1108,8 +1234,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=2032,
- serialized_end=2112,
+ serialized_start=2268,
+ serialized_end=2348,
)
@@ -1187,8 +1313,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=2114,
- serialized_end=2185,
+ serialized_start=2350,
+ serialized_end=2421,
)
@@ -1276,6 +1402,25 @@
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
+ _descriptor.FieldDescriptor(
+ name="version",
+ full_name="google.cloud.videointelligence.v1.LabelAnnotation.version",
+ index=4,
+ number=5,
+ type=9,
+ cpp_type=9,
+ label=1,
+ has_default_value=False,
+ default_value=b"".decode("utf-8"),
+ message_type=None,
+ enum_type=None,
+ containing_type=None,
+ is_extension=False,
+ extension_scope=None,
+ serialized_options=None,
+ file=DESCRIPTOR,
+ create_key=_descriptor._internal_create_key,
+ ),
],
extensions=[],
nested_types=[],
@@ -1285,8 +1430,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=2188,
- serialized_end=2464,
+ serialized_start=2424,
+ serialized_end=2717,
)
@@ -1345,8 +1490,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=2467,
- serialized_end=2616,
+ serialized_start=2720,
+ serialized_end=2869,
)
@@ -1377,6 +1522,25 @@
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
+ _descriptor.FieldDescriptor(
+ name="version",
+ full_name="google.cloud.videointelligence.v1.ExplicitContentAnnotation.version",
+ index=1,
+ number=2,
+ type=9,
+ cpp_type=9,
+ label=1,
+ has_default_value=False,
+ default_value=b"".decode("utf-8"),
+ message_type=None,
+ enum_type=None,
+ containing_type=None,
+ is_extension=False,
+ extension_scope=None,
+ serialized_options=None,
+ file=DESCRIPTOR,
+ create_key=_descriptor._internal_create_key,
+ ),
],
extensions=[],
nested_types=[],
@@ -1386,8 +1550,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=2618,
- serialized_end=2718,
+ serialized_start=2871,
+ serialized_end=2988,
)
@@ -1484,8 +1648,109 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=2720,
- serialized_end=2801,
+ serialized_start=2990,
+ serialized_end=3071,
+)
+
+
+_FACEDETECTIONANNOTATION = _descriptor.Descriptor(
+ name="FaceDetectionAnnotation",
+ full_name="google.cloud.videointelligence.v1.FaceDetectionAnnotation",
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name="version",
+ full_name="google.cloud.videointelligence.v1.FaceDetectionAnnotation.version",
+ index=0,
+ number=5,
+ type=9,
+ cpp_type=9,
+ label=1,
+ has_default_value=False,
+ default_value=b"".decode("utf-8"),
+ message_type=None,
+ enum_type=None,
+ containing_type=None,
+ is_extension=False,
+ extension_scope=None,
+ serialized_options=None,
+ file=DESCRIPTOR,
+ create_key=_descriptor._internal_create_key,
+ ),
+ ],
+ extensions=[],
+ nested_types=[],
+ enum_types=[],
+ serialized_options=None,
+ is_extendable=False,
+ syntax="proto3",
+ extension_ranges=[],
+ oneofs=[],
+ serialized_start=3073,
+ serialized_end=3115,
+)
+
+
+_PERSONDETECTIONANNOTATION = _descriptor.Descriptor(
+ name="PersonDetectionAnnotation",
+ full_name="google.cloud.videointelligence.v1.PersonDetectionAnnotation",
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name="tracks",
+ full_name="google.cloud.videointelligence.v1.PersonDetectionAnnotation.tracks",
+ index=0,
+ number=1,
+ type=11,
+ cpp_type=10,
+ label=3,
+ has_default_value=False,
+ default_value=[],
+ message_type=None,
+ enum_type=None,
+ containing_type=None,
+ is_extension=False,
+ extension_scope=None,
+ serialized_options=None,
+ file=DESCRIPTOR,
+ create_key=_descriptor._internal_create_key,
+ ),
+ _descriptor.FieldDescriptor(
+ name="version",
+ full_name="google.cloud.videointelligence.v1.PersonDetectionAnnotation.version",
+ index=1,
+ number=2,
+ type=9,
+ cpp_type=9,
+ label=1,
+ has_default_value=False,
+ default_value=b"".decode("utf-8"),
+ message_type=None,
+ enum_type=None,
+ containing_type=None,
+ is_extension=False,
+ extension_scope=None,
+ serialized_options=None,
+ file=DESCRIPTOR,
+ create_key=_descriptor._internal_create_key,
+ ),
+ ],
+ extensions=[],
+ nested_types=[],
+ enum_types=[],
+ serialized_options=None,
+ is_extendable=False,
+ syntax="proto3",
+ extension_ranges=[],
+ oneofs=[],
+ serialized_start=3117,
+ serialized_end=3219,
)
@@ -1525,8 +1790,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=2803,
- serialized_end=2882,
+ serialized_start=3221,
+ serialized_end=3300,
)
@@ -1580,13 +1845,13 @@
extensions=[],
nested_types=[],
enum_types=[],
- serialized_options=None,
+ serialized_options=b"\030\001",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=2885,
- serialized_end=3037,
+ serialized_start=3303,
+ serialized_end=3459,
)
@@ -1659,13 +1924,13 @@
extensions=[],
nested_types=[],
enum_types=[],
- serialized_options=None,
+ serialized_options=b"\030\001",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=3040,
- serialized_end=3203,
+ serialized_start=3462,
+ serialized_end=3629,
)
@@ -1762,8 +2027,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=3206,
- serialized_end=3520,
+ serialized_start=3632,
+ serialized_end=3946,
)
@@ -1860,8 +2125,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=3523,
- serialized_end=3783,
+ serialized_start=3949,
+ serialized_end=4209,
)
@@ -1939,8 +2204,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=3785,
- serialized_end=3853,
+ serialized_start=4211,
+ serialized_end=4279,
)
@@ -2018,8 +2283,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=3855,
- serialized_end=3975,
+ serialized_start=4281,
+ serialized_end=4401,
)
@@ -2179,6 +2444,25 @@
containing_type=None,
is_extension=False,
extension_scope=None,
+ serialized_options=b"\030\001",
+ file=DESCRIPTOR,
+ create_key=_descriptor._internal_create_key,
+ ),
+ _descriptor.FieldDescriptor(
+ name="face_detection_annotations",
+ full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.face_detection_annotations",
+ index=8,
+ number=13,
+ type=11,
+ cpp_type=10,
+ label=3,
+ has_default_value=False,
+ default_value=[],
+ message_type=None,
+ enum_type=None,
+ containing_type=None,
+ is_extension=False,
+ extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
@@ -2186,7 +2470,7 @@
_descriptor.FieldDescriptor(
name="shot_annotations",
full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.shot_annotations",
- index=8,
+ index=9,
number=6,
type=11,
cpp_type=10,
@@ -2205,7 +2489,7 @@
_descriptor.FieldDescriptor(
name="explicit_annotation",
full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.explicit_annotation",
- index=9,
+ index=10,
number=7,
type=11,
cpp_type=10,
@@ -2224,7 +2508,7 @@
_descriptor.FieldDescriptor(
name="speech_transcriptions",
full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.speech_transcriptions",
- index=10,
+ index=11,
number=11,
type=11,
cpp_type=10,
@@ -2243,7 +2527,7 @@
_descriptor.FieldDescriptor(
name="text_annotations",
full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.text_annotations",
- index=11,
+ index=12,
number=12,
type=11,
cpp_type=10,
@@ -2262,7 +2546,7 @@
_descriptor.FieldDescriptor(
name="object_annotations",
full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.object_annotations",
- index=12,
+ index=13,
number=14,
type=11,
cpp_type=10,
@@ -2281,7 +2565,7 @@
_descriptor.FieldDescriptor(
name="logo_recognition_annotations",
full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.logo_recognition_annotations",
- index=13,
+ index=14,
number=19,
type=11,
cpp_type=10,
@@ -2297,10 +2581,29 @@
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
+ _descriptor.FieldDescriptor(
+ name="person_detection_annotations",
+ full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.person_detection_annotations",
+ index=15,
+ number=20,
+ type=11,
+ cpp_type=10,
+ label=3,
+ has_default_value=False,
+ default_value=[],
+ message_type=None,
+ enum_type=None,
+ containing_type=None,
+ is_extension=False,
+ extension_scope=None,
+ serialized_options=None,
+ file=DESCRIPTOR,
+ create_key=_descriptor._internal_create_key,
+ ),
_descriptor.FieldDescriptor(
name="error",
full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.error",
- index=14,
+ index=16,
number=9,
type=11,
cpp_type=10,
@@ -2325,8 +2628,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=3978,
- serialized_end=5163,
+ serialized_start=4404,
+ serialized_end=5789,
)
@@ -2366,8 +2669,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=5165,
- serialized_end=5275,
+ serialized_start=5791,
+ serialized_end=5901,
)
@@ -2502,8 +2805,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=5278,
- serialized_end=5572,
+ serialized_start=5904,
+ serialized_end=6198,
)
@@ -2543,8 +2846,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=5574,
- serialized_end=5686,
+ serialized_start=6200,
+ serialized_end=6312,
)
@@ -2736,8 +3039,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=5689,
- serialized_end=6074,
+ serialized_start=6315,
+ serialized_end=6700,
)
@@ -2777,8 +3080,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=6076,
- serialized_end=6113,
+ serialized_start=6702,
+ serialized_end=6739,
)
@@ -2837,8 +3140,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=6116,
- serialized_end=6252,
+ serialized_start=6742,
+ serialized_end=6878,
)
@@ -2916,8 +3219,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=6255,
- serialized_end=6395,
+ serialized_start=6881,
+ serialized_end=7021,
)
@@ -3033,8 +3336,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=6398,
- serialized_end=6565,
+ serialized_start=7024,
+ serialized_end=7191,
)
@@ -3093,8 +3396,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=6567,
- serialized_end=6607,
+ serialized_start=7193,
+ serialized_end=7233,
)
@@ -3134,8 +3437,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=6609,
- serialized_end=6704,
+ serialized_start=7235,
+ serialized_end=7330,
)
@@ -3213,8 +3516,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=6707,
- serialized_end=6868,
+ serialized_start=7333,
+ serialized_end=7494,
)
@@ -3273,8 +3576,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=6871,
- serialized_end=7019,
+ serialized_start=7497,
+ serialized_end=7645,
)
@@ -3324,6 +3627,25 @@
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
+ _descriptor.FieldDescriptor(
+ name="version",
+ full_name="google.cloud.videointelligence.v1.TextAnnotation.version",
+ index=2,
+ number=3,
+ type=9,
+ cpp_type=9,
+ label=1,
+ has_default_value=False,
+ default_value=b"".decode("utf-8"),
+ message_type=None,
+ enum_type=None,
+ containing_type=None,
+ is_extension=False,
+ extension_scope=None,
+ serialized_options=None,
+ file=DESCRIPTOR,
+ create_key=_descriptor._internal_create_key,
+ ),
],
extensions=[],
nested_types=[],
@@ -3333,8 +3655,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=7021,
- serialized_end=7117,
+ serialized_start=7647,
+ serialized_end=7760,
)
@@ -3393,8 +3715,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=7120,
- serialized_end=7280,
+ serialized_start=7763,
+ serialized_end=7923,
)
@@ -3501,6 +3823,25 @@
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
+ _descriptor.FieldDescriptor(
+ name="version",
+ full_name="google.cloud.videointelligence.v1.ObjectTrackingAnnotation.version",
+ index=5,
+ number=6,
+ type=9,
+ cpp_type=9,
+ label=1,
+ has_default_value=False,
+ default_value=b"".decode("utf-8"),
+ message_type=None,
+ enum_type=None,
+ containing_type=None,
+ is_extension=False,
+ extension_scope=None,
+ serialized_options=None,
+ file=DESCRIPTOR,
+ create_key=_descriptor._internal_create_key,
+ ),
],
extensions=[],
nested_types=[],
@@ -3519,8 +3860,8 @@
fields=[],
),
],
- serialized_start=7283,
- serialized_end=7562,
+ serialized_start=7926,
+ serialized_end=8222,
)
@@ -3598,8 +3939,8 @@
syntax="proto3",
extension_ranges=[],
oneofs=[],
- serialized_start=7565,
- serialized_end=7776,
+ serialized_start=8225,
+ serialized_end=8436,
)
_ANNOTATEVIDEOREQUEST.fields_by_name["features"].enum_type = _FEATURE
@@ -3623,6 +3964,9 @@
_VIDEOCONTEXT.fields_by_name[
"text_detection_config"
].message_type = _TEXTDETECTIONCONFIG
+_VIDEOCONTEXT.fields_by_name[
+ "person_detection_config"
+].message_type = _PERSONDETECTIONCONFIG
_VIDEOCONTEXT.fields_by_name[
"object_tracking_config"
].message_type = _OBJECTTRACKINGCONFIG
@@ -3648,6 +3992,7 @@
].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_EXPLICITCONTENTFRAME.fields_by_name["pornography_likelihood"].enum_type = _LIKELIHOOD
_EXPLICITCONTENTANNOTATION.fields_by_name["frames"].message_type = _EXPLICITCONTENTFRAME
+_PERSONDETECTIONANNOTATION.fields_by_name["tracks"].message_type = _TRACK
_FACESEGMENT.fields_by_name["segment"].message_type = _VIDEOSEGMENT
_FACEFRAME.fields_by_name[
"normalized_bounding_boxes"
@@ -3688,6 +4033,9 @@
_VIDEOANNOTATIONRESULTS.fields_by_name[
"face_annotations"
].message_type = _FACEANNOTATION
+_VIDEOANNOTATIONRESULTS.fields_by_name[
+ "face_detection_annotations"
+].message_type = _FACEDETECTIONANNOTATION
_VIDEOANNOTATIONRESULTS.fields_by_name["shot_annotations"].message_type = _VIDEOSEGMENT
_VIDEOANNOTATIONRESULTS.fields_by_name[
"explicit_annotation"
@@ -3704,6 +4052,9 @@
_VIDEOANNOTATIONRESULTS.fields_by_name[
"logo_recognition_annotations"
].message_type = _LOGORECOGNITIONANNOTATION
+_VIDEOANNOTATIONRESULTS.fields_by_name[
+ "person_detection_annotations"
+].message_type = _PERSONDETECTIONANNOTATION
_VIDEOANNOTATIONRESULTS.fields_by_name[
"error"
].message_type = google_dot_rpc_dot_status__pb2._STATUS
@@ -3774,6 +4125,7 @@
] = _SHOTCHANGEDETECTIONCONFIG
DESCRIPTOR.message_types_by_name["ObjectTrackingConfig"] = _OBJECTTRACKINGCONFIG
DESCRIPTOR.message_types_by_name["FaceDetectionConfig"] = _FACEDETECTIONCONFIG
+DESCRIPTOR.message_types_by_name["PersonDetectionConfig"] = _PERSONDETECTIONCONFIG
DESCRIPTOR.message_types_by_name[
"ExplicitContentDetectionConfig"
] = _EXPLICITCONTENTDETECTIONCONFIG
@@ -3788,6 +4140,10 @@
"ExplicitContentAnnotation"
] = _EXPLICITCONTENTANNOTATION
DESCRIPTOR.message_types_by_name["NormalizedBoundingBox"] = _NORMALIZEDBOUNDINGBOX
+DESCRIPTOR.message_types_by_name["FaceDetectionAnnotation"] = _FACEDETECTIONANNOTATION
+DESCRIPTOR.message_types_by_name[
+ "PersonDetectionAnnotation"
+] = _PERSONDETECTIONANNOTATION
DESCRIPTOR.message_types_by_name["FaceSegment"] = _FACESEGMENT
DESCRIPTOR.message_types_by_name["FaceFrame"] = _FACEFRAME
DESCRIPTOR.message_types_by_name["FaceAnnotation"] = _FACEANNOTATION
@@ -3831,23 +4187,24 @@
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Video annotation request.
+
Attributes:
input_uri:
- Input video location. Currently, only `Google Cloud Storage
- `__ URIs are supported,
- which must be specified in the following format:
- ``gs://bucket-id/object-id`` (other URI formats return [google
- .rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT])
- . For more information, see `Request URIs
+ Input video location. Currently, only `Cloud Storage
+ `__ URIs are supported.
+ URIs must be specified in the following format: ``gs://bucket-
+ id/object-id`` (other URI formats return [google.rpc.Code.INVA
+ LID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more
+ information, see `Request URIs
`__.
- A video URI may include wildcards in ``object-id``, and thus
- identify multiple videos. Supported wildcards: ’*’ to match 0
- or more characters; ‘?’ to match 1 character. If unset, the
- input video should be embedded in the request as
- ``input_content``. If set, ``input_content`` should be unset.
+ To identify multiple videos, a video URI may include wildcards
+ in the ``object-id``. Supported wildcards: ’*’ to match 0 or
+ more characters; ‘?’ to match 1 character. If unset, the input
+ video should be embedded in the request as ``input_content``.
+ If set, ``input_content`` must be unset.
input_content:
The video data bytes. If unset, the input video(s) should be
- specified via ``input_uri``. If set, ``input_uri`` should be
+ specified via the ``input_uri``. If set, ``input_uri`` must be
unset.
features:
Required. Requested video annotation features.
@@ -3855,18 +4212,18 @@
Additional video context and/or feature-specific parameters.
output_uri:
Optional. Location where the output (in JSON format) should be
- stored. Currently, only `Google Cloud Storage
- `__ URIs are supported,
- which must be specified in the following format:
+ stored. Currently, only `Cloud Storage
+ `__ URIs are supported.
+ These must be specified in the following format:
``gs://bucket-id/object-id`` (other URI formats return [google
.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT])
. For more information, see `Request URIs
`__.
location_id:
Optional. Cloud region where annotation should take place.
- Supported cloud regions: ``us-east1``, ``us-west1``, ``europe-
- west1``, ``asia-east1``. If no region is specified, a region
- will be determined based on video file location.
+ Supported cloud regions are: ``us-east1``, ``us-west1``,
+ ``europe-west1``, ``asia-east1``. If no region is specified,
+ the region will be determined based on video file location.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.AnnotateVideoRequest)
},
@@ -3881,6 +4238,7 @@
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Video context and/or feature-specific parameters.
+
Attributes:
segments:
Video segments to annotate. The segments may overlap and are
@@ -3898,6 +4256,8 @@
Config for SPEECH_TRANSCRIPTION.
text_detection_config:
Config for TEXT_DETECTION.
+ person_detection_config:
+ Config for PERSON_DETECTION.
object_tracking_config:
Config for OBJECT_TRACKING.
""",
@@ -3914,13 +4274,14 @@
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Config for LABEL_DETECTION.
+
Attributes:
label_detection_mode:
What labels should be detected with LABEL_DETECTION, in
addition to video-level labels or segment-level labels. If
unspecified, defaults to ``SHOT_MODE``.
stationary_camera:
- Whether the video has been shot from a stationary (i.e. non-
+ Whether the video has been shot from a stationary (i.e., non-
moving) camera. When set to true, might improve detection
accuracy for moving objects. Should be used with
``SHOT_AND_FRAME_MODE`` enabled.
@@ -3931,18 +4292,17 @@
The confidence threshold we perform filtering on the labels
from frame-level detection. If not set, it is set to 0.4 by
default. The valid range for this threshold is [0.1, 0.9]. Any
- value set outside of this range will be clipped. Note: for
- best results please follow the default threshold. We will
- update the default threshold everytime when we release a new
- model.
+ value set outside of this range will be clipped. Note: For
+ best results, follow the default threshold. We will update the
+ default threshold everytime when we release a new model.
video_confidence_threshold:
The confidence threshold we perform filtering on the labels
- from video-level and shot-level detections. If not set, it is
+ from video-level and shot-level detections. If not set, it’s
set to 0.3 by default. The valid range for this threshold is
[0.1, 0.9]. Any value set outside of this range will be
- clipped. Note: for best results please follow the default
- threshold. We will update the default threshold everytime when
- we release a new model.
+ clipped. Note: For best results, follow the default threshold.
+ We will update the default threshold everytime when we release
+ a new model.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.LabelDetectionConfig)
},
@@ -3957,6 +4317,7 @@
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Config for SHOT_CHANGE_DETECTION.
+
Attributes:
model:
Model to use for shot change detection. Supported values:
@@ -3975,6 +4336,7 @@
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Config for OBJECT_TRACKING.
+
Attributes:
model:
Model to use for object tracking. Supported values:
@@ -3993,19 +4355,51 @@
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Config for FACE_DETECTION.
+
Attributes:
model:
Model to use for face detection. Supported values:
“builtin/stable” (the default if unset) and “builtin/latest”.
include_bounding_boxes:
- Whether bounding boxes be included in the face annotation
+ Whether bounding boxes are included in the face annotation
output.
+ include_attributes:
+ Whether to enable face attributes detection, such as glasses,
+ dark_glasses, mouth_open etc. Ignored if
+ ‘include_bounding_boxes’ is set to false.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.FaceDetectionConfig)
},
)
_sym_db.RegisterMessage(FaceDetectionConfig)
+PersonDetectionConfig = _reflection.GeneratedProtocolMessageType(
+ "PersonDetectionConfig",
+ (_message.Message,),
+ {
+ "DESCRIPTOR": _PERSONDETECTIONCONFIG,
+ "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
+ "__doc__": """Config for PERSON_DETECTION.
+
+
+ Attributes:
+ include_bounding_boxes:
+ Whether bounding boxes are included in the person detection
+ annotation output.
+ include_pose_landmarks:
+ Whether to enable pose landmarks detection. Ignored if
+ ‘include_bounding_boxes’ is set to false.
+ include_attributes:
+ Whether to enable person attributes detection, such as cloth
+ color (black, blue, etc), type (coat, dress, etc), pattern
+ (plain, floral, etc), hair, etc. Ignored if
+ ‘include_bounding_boxes’ is set to false.
+ """,
+ # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.PersonDetectionConfig)
+ },
+)
+_sym_db.RegisterMessage(PersonDetectionConfig)
+
ExplicitContentDetectionConfig = _reflection.GeneratedProtocolMessageType(
"ExplicitContentDetectionConfig",
(_message.Message,),
@@ -4014,6 +4408,7 @@
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Config for EXPLICIT_CONTENT_DETECTION.
+
Attributes:
model:
Model to use for explicit content detection. Supported values:
@@ -4032,6 +4427,7 @@
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Config for TEXT_DETECTION.
+
Attributes:
language_hints:
Language hint can be specified if the language to be detected
@@ -4056,6 +4452,7 @@
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Video segment.
+
Attributes:
start_time_offset:
Time-offset, relative to the beginning of the video,
@@ -4077,6 +4474,7 @@
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Video segment level annotation results for label detection.
+
Attributes:
segment:
Video segment where a label was detected.
@@ -4096,6 +4494,7 @@
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Video frame level annotation results for label detection.
+
Attributes:
time_offset:
Time-offset, relative to the beginning of the video,
@@ -4116,13 +4515,14 @@
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Detected entity from video analysis.
+
Attributes:
entity_id:
Opaque entity ID. Some IDs may be available in `Google
Knowledge Graph Search API
`__.
description:
- Textual description, e.g. ``Fixed-gear bicycle``.
+ Textual description, e.g., ``Fixed-gear bicycle``.
language_code:
Language code for ``description`` in BCP-47 format.
""",
@@ -4139,18 +4539,21 @@
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Label annotation.
+
Attributes:
entity:
Detected entity.
category_entities:
- Common categories for the detected entity. E.g. when the label
- is ``Terrier`` the category is likely ``dog``. And in some
- cases there might be more than one categories e.g. ``Terrier``
- could also be a ``pet``.
+ Common categories for the detected entity. For example, when
+ the label is ``Terrier``, the category is likely ``dog``. And
+ in some cases there might be more than one categories e.g.,
+ ``Terrier`` could also be a ``pet``.
segments:
All video segments where a label was detected.
frames:
All video frames where a label was detected.
+ version:
+ Feature version.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.LabelAnnotation)
},
@@ -4165,6 +4568,7 @@
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Video frame level annotation results for explicit content.
+
Attributes:
time_offset:
Time-offset, relative to the beginning of the video,
@@ -4187,9 +4591,12 @@
If no explicit content has been detected in a frame, no annotations
are present for that frame.
+
Attributes:
frames:
All video frames where explicit content was detected.
+ version:
+ Feature version.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.ExplicitContentAnnotation)
},
@@ -4205,6 +4612,7 @@
"__doc__": """Normalized bounding box. The normalized vertex coordinates are
relative to the original image. Range: [0, 1].
+
Attributes:
left:
Left X coordinate.
@@ -4220,6 +4628,44 @@
)
_sym_db.RegisterMessage(NormalizedBoundingBox)
+FaceDetectionAnnotation = _reflection.GeneratedProtocolMessageType(
+ "FaceDetectionAnnotation",
+ (_message.Message,),
+ {
+ "DESCRIPTOR": _FACEDETECTIONANNOTATION,
+ "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
+ "__doc__": """Face detection annotation.
+
+
+ Attributes:
+ version:
+ Feature version.
+ """,
+ # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.FaceDetectionAnnotation)
+ },
+)
+_sym_db.RegisterMessage(FaceDetectionAnnotation)
+
+PersonDetectionAnnotation = _reflection.GeneratedProtocolMessageType(
+ "PersonDetectionAnnotation",
+ (_message.Message,),
+ {
+ "DESCRIPTOR": _PERSONDETECTIONANNOTATION,
+ "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
+ "__doc__": """Person detection annotation per video.
+
+
+ Attributes:
+ tracks:
+ The detected tracks of a person.
+ version:
+ Feature version.
+ """,
+ # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.PersonDetectionAnnotation)
+ },
+)
+_sym_db.RegisterMessage(PersonDetectionAnnotation)
+
FaceSegment = _reflection.GeneratedProtocolMessageType(
"FaceSegment",
(_message.Message,),
@@ -4228,6 +4674,7 @@
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Video segment level annotation results for face detection.
+
Attributes:
segment:
Video segment where a face was detected.
@@ -4243,8 +4690,9 @@
{
"DESCRIPTOR": _FACEFRAME,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
- "__doc__": """Video frame level annotation results for face detection.
+ "__doc__": """Deprecated. No effect.
+
Attributes:
normalized_bounding_boxes:
Normalized Bounding boxes in a frame. There can be more than
@@ -4265,8 +4713,9 @@
{
"DESCRIPTOR": _FACEANNOTATION,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
- "__doc__": """Face annotation.
+ "__doc__": """Deprecated. No effect.
+
Attributes:
thumbnail:
Thumbnail of a representative face view (in JPEG format).
@@ -4289,6 +4738,7 @@
"__doc__": """For tracking related features. An object at time_offset with
attributes, and located with normalized_bounding_box.
+
Attributes:
normalized_bounding_box:
Normalized Bounding box in a frame, where the object is
@@ -4314,6 +4764,7 @@
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """A track of an object instance.
+
Attributes:
segment:
Video segment of a track.
@@ -4338,10 +4789,11 @@
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """A generic detected attribute represented by name in string format.
+
Attributes:
name:
- The name of the attribute, i.e. glasses, dark_glasses,
- mouth_open etc. A full list of supported type names will be
+ The name of the attribute, for example, glasses, dark_glasses,
+ mouth_open. A full list of supported type names will be
provided in the document.
confidence:
Detected attribute confidence. Range [0, 1].
@@ -4363,9 +4815,11 @@
"__doc__": """A generic detected landmark represented by name in string format and a
2D location.
+
Attributes:
name:
- The name of this landmark, i.e. left_hand, right_shoulder.
+ The name of this landmark, for example, left_hand,
+ right_shoulder.
point:
The 2D point of the detected landmark using the normalized
image coordindate system. The normalized coordinates have the
@@ -4386,18 +4840,19 @@
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Annotation results for a single video.
+
Attributes:
input_uri:
- Video file location in `Google Cloud Storage
+ Video file location in `Cloud Storage
`__.
segment:
Video segment on which the annotation is run.
segment_label_annotations:
- Topical label annotations on video level or user specified
+ Topical label annotations on video level or user-specified
segment level. There is exactly one element for each unique
label.
segment_presence_label_annotations:
- Presence label annotations on video level or user specified
+ Presence label annotations on video level or user-specified
segment level. There is exactly one element for each unique
label. Compared to the existing topical
``segment_label_annotations``, this field presents more fine-
@@ -4420,8 +4875,9 @@
Label annotations on frame level. There is exactly one element
for each unique label.
face_annotations:
- Face annotations. There is exactly one element for each unique
- face.
+ Deprecated. Please use ``face_detection_annotations`` instead.
+ face_detection_annotations:
+ Face detection annotations.
shot_annotations:
Shot annotations. Each shot is represented as a video segment.
explicit_annotation:
@@ -4437,6 +4893,8 @@
logo_recognition_annotations:
Annotations for list of logos detected, tracked and recognized
in video.
+ person_detection_annotations:
+ Person detection annotations.
error:
If set, indicates an error. Note that for a single
``AnnotateVideoRequest`` some videos may succeed and some may
@@ -4457,6 +4915,7 @@
``Operation`` returned by the ``GetOperation`` call of the
``google::longrunning::Operations`` service.
+
Attributes:
annotation_results:
Annotation results for all videos specified in
@@ -4475,9 +4934,10 @@
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Annotation progress for a single video.
+
Attributes:
input_uri:
- Video file location in `Google Cloud Storage
+ Video file location in `Cloud Storage
`__.
progress_percent:
Approximate percentage processed thus far. Guaranteed to be
@@ -4488,10 +4948,10 @@
Time of the most recent update.
feature:
Specifies which feature is being tracked if the request
- contains more than one features.
+ contains more than one feature.
segment:
Specifies which segment is being tracked if the request
- contains more than one segments.
+ contains more than one segment.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.VideoAnnotationProgress)
},
@@ -4508,6 +4968,7 @@
``Operation`` returned by the ``GetOperation`` call of the
``google::longrunning::Operations`` service.
+
Attributes:
annotation_progress:
Progress metadata for all videos specified in
@@ -4526,6 +4987,7 @@
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Config for SPEECH_TRANSCRIPTION.
+
Attributes:
language_code:
Required. *Required* The language of the supplied audio as a
@@ -4569,9 +5031,9 @@
result using a speaker_tag provided in the WordInfo. Note:
When this is true, we send all the words from the beginning of
the audio for the top alternative in every consecutive
- responses. This is done in order to improve our speaker tags
- as our models learn to identify the speakers in the
- conversation over time.
+ response. This is done in order to improve our speaker tags as
+ our models learn to identify the speakers in the conversation
+ over time.
diarization_speaker_count:
Optional. If set, specifies the estimated number of speakers
in the conversation. If not set, defaults to ‘2’. Ignored
@@ -4596,6 +5058,7 @@
"__doc__": """Provides “hints” to the speech recognizer to favor specific words and
phrases in the results.
+
Attributes:
phrases:
Optional. A list of strings containing words and phrases
@@ -4620,6 +5083,7 @@
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """A speech recognition result corresponding to a portion of the audio.
+
Attributes:
alternatives:
May contain one or more recognition hypotheses (up to the
@@ -4646,6 +5110,7 @@
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Alternative hypotheses (a.k.a. n-best list).
+
Attributes:
transcript:
Transcript text representing the words that the user spoke.
@@ -4660,8 +5125,8 @@
words:
Output only. A list of word-specific information for each
recognized word. Note: When ``enable_speaker_diarization`` is
- true, you will see all the words from the beginning of the
- audio.
+ set to true, you will see all the words from the beginning of
+ the audio.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.SpeechRecognitionAlternative)
},
@@ -4678,6 +5143,7 @@
only included in the response when certain request parameters are set,
such as ``enable_word_time_offsets``.
+
Attributes:
start_time:
Time offset relative to the beginning of the audio, and
@@ -4721,6 +5187,7 @@
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """X coordinate.
+
Attributes:
y:
Y coordinate.
@@ -4745,6 +5212,7 @@
3). Note that values can be less than 0, or greater than 1 due to
trignometric calculations for location of the box.
+
Attributes:
vertices:
Normalized vertices of the bounding polygon.
@@ -4762,6 +5230,7 @@
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Video segment level annotation results for text detection.
+
Attributes:
segment:
Video segment where a text snippet was detected.
@@ -4787,6 +5256,7 @@
Contains information regarding timestamp and bounding box locations
for the frames containing detected OCR text snippets.
+
Attributes:
rotated_bounding_box:
Bounding polygon of the detected text for this frame.
@@ -4808,11 +5278,14 @@
contain the corresponding text, confidence value, and frame level
information for each detection.
+
Attributes:
text:
The detected text.
segments:
All video segments where OCR detected text appears.
+ version:
+ Feature version.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.TextAnnotation)
},
@@ -4828,6 +5301,7 @@
"__doc__": """Video frame level annotations for object detection and tracking. This
field stores per frame location, time offset, and confidence.
+
Attributes:
normalized_bounding_box:
The normalized bounding box location of this object track for
@@ -4848,6 +5322,7 @@
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Annotations corresponding to one tracked object.
+
Attributes:
track_info:
Different representation of tracking info in non-streaming
@@ -4873,6 +5348,8 @@
multiple ObjectTrackingFrame messages in frames. Streaming
mode: it can only be one ObjectTrackingFrame message in
frames.
+ version:
+ Feature version.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.ObjectTrackingAnnotation)
},
@@ -4888,6 +5365,7 @@
"__doc__": """Annotation corresponding to one detected, tracked and recognized logo
class.
+
Attributes:
entity:
Entity category information to specify the logo class that all
@@ -4912,10 +5390,13 @@
_ANNOTATEVIDEOREQUEST.fields_by_name["features"]._options = None
_ANNOTATEVIDEOREQUEST.fields_by_name["output_uri"]._options = None
_ANNOTATEVIDEOREQUEST.fields_by_name["location_id"]._options = None
+_FACEFRAME._options = None
+_FACEANNOTATION._options = None
_TIMESTAMPEDOBJECT.fields_by_name["attributes"]._options = None
_TIMESTAMPEDOBJECT.fields_by_name["landmarks"]._options = None
_TRACK.fields_by_name["attributes"]._options = None
_TRACK.fields_by_name["confidence"]._options = None
+_VIDEOANNOTATIONRESULTS.fields_by_name["face_annotations"]._options = None
_SPEECHTRANSCRIPTIONCONFIG.fields_by_name["language_code"]._options = None
_SPEECHTRANSCRIPTIONCONFIG.fields_by_name["max_alternatives"]._options = None
_SPEECHTRANSCRIPTIONCONFIG.fields_by_name["filter_profanity"]._options = None
@@ -4941,8 +5422,8 @@
index=0,
serialized_options=b"\312A videointelligence.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform",
create_key=_descriptor._internal_create_key,
- serialized_start=8239,
- serialized_end=8559,
+ serialized_start=8921,
+ serialized_end=9241,
methods=[
_descriptor.MethodDescriptor(
name="AnnotateVideo",
diff --git a/google/cloud/videointelligence_v1/proto/video_intelligence_pb2_grpc.py b/google/cloud/videointelligence_v1/proto/video_intelligence_pb2_grpc.py
index 217caa64..928dbd7e 100644
--- a/google/cloud/videointelligence_v1/proto/video_intelligence_pb2_grpc.py
+++ b/google/cloud/videointelligence_v1/proto/video_intelligence_pb2_grpc.py
@@ -1,4 +1,5 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.cloud.videointelligence_v1.proto import (
@@ -10,15 +11,15 @@
class VideoIntelligenceServiceStub(object):
- """Service that implements Google Cloud Video Intelligence API.
- """
+ """Service that implements the Video Intelligence API.
+ """
def __init__(self, channel):
"""Constructor.
- Args:
- channel: A grpc.Channel.
- """
+ Args:
+ channel: A grpc.Channel.
+ """
self.AnnotateVideo = channel.unary_unary(
"/google.cloud.videointelligence.v1.VideoIntelligenceService/AnnotateVideo",
request_serializer=google_dot_cloud_dot_videointelligence__v1_dot_proto_dot_video__intelligence__pb2.AnnotateVideoRequest.SerializeToString,
@@ -27,15 +28,15 @@ def __init__(self, channel):
class VideoIntelligenceServiceServicer(object):
- """Service that implements Google Cloud Video Intelligence API.
- """
+ """Service that implements the Video Intelligence API.
+ """
def AnnotateVideo(self, request, context):
"""Performs asynchronous video annotation. Progress and results can be
- retrieved through the `google.longrunning.Operations` interface.
- `Operation.metadata` contains `AnnotateVideoProgress` (progress).
- `Operation.response` contains `AnnotateVideoResponse` (results).
- """
+ retrieved through the `google.longrunning.Operations` interface.
+ `Operation.metadata` contains `AnnotateVideoProgress` (progress).
+ `Operation.response` contains `AnnotateVideoResponse` (results).
+ """
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
@@ -54,3 +55,36 @@ def add_VideoIntelligenceServiceServicer_to_server(servicer, server):
rpc_method_handlers,
)
server.add_generic_rpc_handlers((generic_handler,))
+
+
+# This class is part of an EXPERIMENTAL API.
+class VideoIntelligenceService(object):
+ """Service that implements the Video Intelligence API.
+ """
+
+ @staticmethod
+ def AnnotateVideo(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.cloud.videointelligence.v1.VideoIntelligenceService/AnnotateVideo",
+ google_dot_cloud_dot_videointelligence__v1_dot_proto_dot_video__intelligence__pb2.AnnotateVideoRequest.SerializeToString,
+ google_dot_longrunning_dot_operations__pb2.Operation.FromString,
+ options,
+ channel_credentials,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ )
diff --git a/google/cloud/videointelligence_v1beta2/gapic/video_intelligence_service_client_config.py b/google/cloud/videointelligence_v1beta2/gapic/video_intelligence_service_client_config.py
index 9bae7bc2..01a2f2ac 100644
--- a/google/cloud/videointelligence_v1beta2/gapic/video_intelligence_service_client_config.py
+++ b/google/cloud/videointelligence_v1beta2/gapic/video_intelligence_service_client_config.py
@@ -2,25 +2,34 @@
"interfaces": {
"google.cloud.videointelligence.v1beta2.VideoIntelligenceService": {
"retry_codes": {
- "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
- "non_idempotent": [],
+ "retry_policy_1_codes": ["UNAVAILABLE", "DEADLINE_EXCEEDED"],
+ "no_retry_codes": [],
},
"retry_params": {
- "default": {
+ "retry_policy_1_params": {
"initial_retry_delay_millis": 1000,
"retry_delay_multiplier": 2.5,
"max_retry_delay_millis": 120000,
- "initial_rpc_timeout_millis": 120000,
+ "initial_rpc_timeout_millis": 600000,
"rpc_timeout_multiplier": 1.0,
- "max_rpc_timeout_millis": 120000,
+ "max_rpc_timeout_millis": 600000,
"total_timeout_millis": 600000,
- }
+ },
+ "no_retry_params": {
+ "initial_retry_delay_millis": 0,
+ "retry_delay_multiplier": 0.0,
+ "max_retry_delay_millis": 0,
+ "initial_rpc_timeout_millis": 0,
+ "rpc_timeout_multiplier": 1.0,
+ "max_rpc_timeout_millis": 0,
+ "total_timeout_millis": 0,
+ },
},
"methods": {
"AnnotateVideo": {
- "timeout_millis": 60000,
- "retry_codes_name": "idempotent",
- "retry_params_name": "default",
+ "timeout_millis": 600000,
+ "retry_codes_name": "retry_policy_1_codes",
+ "retry_params_name": "retry_policy_1_params",
}
},
}
diff --git a/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py
index 44716628..96840dd2 100644
--- a/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py
+++ b/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/videointelligence_v1beta2/proto/video_intelligence.proto
-
+"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
@@ -1907,6 +1907,7 @@
"__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2",
"__doc__": """Video annotation request.
+
Attributes:
input_uri:
Input video location. Currently, only `Google Cloud Storage
@@ -1957,6 +1958,7 @@
"__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2",
"__doc__": """Video context and/or feature-specific parameters.
+
Attributes:
segments:
Video segments to annotate. The segments may overlap and are
@@ -1984,6 +1986,7 @@
"__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2",
"__doc__": """Config for LABEL_DETECTION.
+
Attributes:
label_detection_mode:
What labels should be detected with LABEL_DETECTION, in
@@ -2011,6 +2014,7 @@
"__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2",
"__doc__": """Config for SHOT_CHANGE_DETECTION.
+
Attributes:
model:
Model to use for shot change detection. Supported values:
@@ -2029,6 +2033,7 @@
"__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2",
"__doc__": """Config for EXPLICIT_CONTENT_DETECTION.
+
Attributes:
model:
Model to use for explicit content detection. Supported values:
@@ -2047,6 +2052,7 @@
"__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2",
"__doc__": """Config for FACE_DETECTION.
+
Attributes:
model:
Model to use for face detection. Supported values:
@@ -2068,6 +2074,7 @@
"__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2",
"__doc__": """Video segment.
+
Attributes:
start_time_offset:
Time-offset, relative to the beginning of the video,
@@ -2089,6 +2096,7 @@
"__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2",
"__doc__": """Video segment level annotation results for label detection.
+
Attributes:
segment:
Video segment where a label was detected.
@@ -2108,6 +2116,7 @@
"__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2",
"__doc__": """Video frame level annotation results for label detection.
+
Attributes:
time_offset:
Time-offset, relative to the beginning of the video,
@@ -2128,6 +2137,7 @@
"__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2",
"__doc__": """Detected entity from video analysis.
+
Attributes:
entity_id:
Opaque entity ID. Some IDs may be available in `Google
@@ -2151,6 +2161,7 @@
"__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2",
"__doc__": """Label annotation.
+
Attributes:
entity:
Detected entity.
@@ -2177,6 +2188,7 @@
"__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2",
"__doc__": """Video frame level annotation results for explicit content.
+
Attributes:
time_offset:
Time-offset, relative to the beginning of the video,
@@ -2199,6 +2211,7 @@
If no explicit content has been detected in a frame, no annotations
are present for that frame.
+
Attributes:
frames:
All video frames where explicit content was detected.
@@ -2217,6 +2230,7 @@
"__doc__": """Normalized bounding box. The normalized vertex coordinates are
relative to the original image. Range: [0, 1].
+
Attributes:
left:
Left X coordinate.
@@ -2240,6 +2254,7 @@
"__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2",
"__doc__": """Video segment level annotation results for face detection.
+
Attributes:
segment:
Video segment where a face was detected.
@@ -2257,6 +2272,7 @@
"__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2",
"__doc__": """Video frame level annotation results for face detection.
+
Attributes:
normalized_bounding_boxes:
Normalized Bounding boxes in a frame. There can be more than
@@ -2279,6 +2295,7 @@
"__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2",
"__doc__": """Face annotation.
+
Attributes:
thumbnail:
Thumbnail of a representative face view (in JPEG format).
@@ -2300,6 +2317,7 @@
"__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2",
"__doc__": """Annotation results for a single video.
+
Attributes:
input_uri:
Video file location in `Google Cloud Storage
@@ -2340,6 +2358,7 @@
``Operation`` returned by the ``GetOperation`` call of the
``google::longrunning::Operations`` service.
+
Attributes:
annotation_results:
Annotation results for all videos specified in
@@ -2358,6 +2377,7 @@
"__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2",
"__doc__": """Annotation progress for a single video.
+
Attributes:
input_uri:
Video file location in `Google Cloud Storage
@@ -2385,6 +2405,7 @@
``Operation`` returned by the ``GetOperation`` call of the
``google::longrunning::Operations`` service.
+
Attributes:
annotation_progress:
Progress metadata for all videos specified in
diff --git a/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2_grpc.py b/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2_grpc.py
index 0ae18df7..6e49a339 100644
--- a/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2_grpc.py
+++ b/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2_grpc.py
@@ -1,4 +1,5 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.cloud.videointelligence_v1beta2.proto import (
@@ -11,14 +12,14 @@
class VideoIntelligenceServiceStub(object):
"""Service that implements Google Cloud Video Intelligence API.
- """
+ """
def __init__(self, channel):
"""Constructor.
- Args:
- channel: A grpc.Channel.
- """
+ Args:
+ channel: A grpc.Channel.
+ """
self.AnnotateVideo = channel.unary_unary(
"/google.cloud.videointelligence.v1beta2.VideoIntelligenceService/AnnotateVideo",
request_serializer=google_dot_cloud_dot_videointelligence__v1beta2_dot_proto_dot_video__intelligence__pb2.AnnotateVideoRequest.SerializeToString,
@@ -28,14 +29,14 @@ def __init__(self, channel):
class VideoIntelligenceServiceServicer(object):
"""Service that implements Google Cloud Video Intelligence API.
- """
+ """
def AnnotateVideo(self, request, context):
"""Performs asynchronous video annotation. Progress and results can be
- retrieved through the `google.longrunning.Operations` interface.
- `Operation.metadata` contains `AnnotateVideoProgress` (progress).
- `Operation.response` contains `AnnotateVideoResponse` (results).
- """
+ retrieved through the `google.longrunning.Operations` interface.
+ `Operation.metadata` contains `AnnotateVideoProgress` (progress).
+ `Operation.response` contains `AnnotateVideoResponse` (results).
+ """
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
@@ -54,3 +55,36 @@ def add_VideoIntelligenceServiceServicer_to_server(servicer, server):
rpc_method_handlers,
)
server.add_generic_rpc_handlers((generic_handler,))
+
+
+# This class is part of an EXPERIMENTAL API.
+class VideoIntelligenceService(object):
+ """Service that implements Google Cloud Video Intelligence API.
+ """
+
+ @staticmethod
+ def AnnotateVideo(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.cloud.videointelligence.v1beta2.VideoIntelligenceService/AnnotateVideo",
+ google_dot_cloud_dot_videointelligence__v1beta2_dot_proto_dot_video__intelligence__pb2.AnnotateVideoRequest.SerializeToString,
+ google_dot_longrunning_dot_operations__pb2.Operation.FromString,
+ options,
+ channel_credentials,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ )
diff --git a/google/cloud/videointelligence_v1p1beta1/gapic/video_intelligence_service_client_config.py b/google/cloud/videointelligence_v1p1beta1/gapic/video_intelligence_service_client_config.py
index ba29086d..03bdba6a 100644
--- a/google/cloud/videointelligence_v1p1beta1/gapic/video_intelligence_service_client_config.py
+++ b/google/cloud/videointelligence_v1p1beta1/gapic/video_intelligence_service_client_config.py
@@ -2,25 +2,34 @@
"interfaces": {
"google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService": {
"retry_codes": {
- "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
- "non_idempotent": [],
+ "retry_policy_1_codes": ["UNAVAILABLE", "DEADLINE_EXCEEDED"],
+ "no_retry_codes": [],
},
"retry_params": {
- "default": {
+ "retry_policy_1_params": {
"initial_retry_delay_millis": 1000,
"retry_delay_multiplier": 2.5,
"max_retry_delay_millis": 120000,
- "initial_rpc_timeout_millis": 120000,
+ "initial_rpc_timeout_millis": 600000,
"rpc_timeout_multiplier": 1.0,
- "max_rpc_timeout_millis": 120000,
+ "max_rpc_timeout_millis": 600000,
"total_timeout_millis": 600000,
- }
+ },
+ "no_retry_params": {
+ "initial_retry_delay_millis": 0,
+ "retry_delay_multiplier": 0.0,
+ "max_retry_delay_millis": 0,
+ "initial_rpc_timeout_millis": 0,
+ "rpc_timeout_multiplier": 1.0,
+ "max_rpc_timeout_millis": 0,
+ "total_timeout_millis": 0,
+ },
},
"methods": {
"AnnotateVideo": {
"timeout_millis": 600000,
- "retry_codes_name": "idempotent",
- "retry_params_name": "default",
+ "retry_codes_name": "retry_policy_1_codes",
+ "retry_params_name": "retry_policy_1_params",
}
},
}
diff --git a/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2.py
index bf542aec..c5de794c 100644
--- a/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2.py
+++ b/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/videointelligence_v1p1beta1/proto/video_intelligence.proto
-
+"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
@@ -1953,6 +1953,7 @@
"__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2",
"__doc__": """Video annotation request.
+
Attributes:
input_uri:
Input video location. Currently, only `Google Cloud Storage
@@ -2003,6 +2004,7 @@
"__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2",
"__doc__": """Video context and/or feature-specific parameters.
+
Attributes:
segments:
Video segments to annotate. The segments may overlap and are
@@ -2030,6 +2032,7 @@
"__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2",
"__doc__": """Config for LABEL_DETECTION.
+
Attributes:
label_detection_mode:
What labels should be detected with LABEL_DETECTION, in
@@ -2057,6 +2060,7 @@
"__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2",
"__doc__": """Config for SHOT_CHANGE_DETECTION.
+
Attributes:
model:
Model to use for shot change detection. Supported values:
@@ -2075,6 +2079,7 @@
"__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2",
"__doc__": """Config for EXPLICIT_CONTENT_DETECTION.
+
Attributes:
model:
Model to use for explicit content detection. Supported values:
@@ -2093,6 +2098,7 @@
"__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2",
"__doc__": """Video segment.
+
Attributes:
start_time_offset:
Time-offset, relative to the beginning of the video,
@@ -2114,6 +2120,7 @@
"__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2",
"__doc__": """Video segment level annotation results for label detection.
+
Attributes:
segment:
Video segment where a label was detected.
@@ -2133,6 +2140,7 @@
"__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2",
"__doc__": """Video frame level annotation results for label detection.
+
Attributes:
time_offset:
Time-offset, relative to the beginning of the video,
@@ -2153,6 +2161,7 @@
"__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2",
"__doc__": """Detected entity from video analysis.
+
Attributes:
entity_id:
Opaque entity ID. Some IDs may be available in `Google
@@ -2176,6 +2185,7 @@
"__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2",
"__doc__": """Label annotation.
+
Attributes:
entity:
Detected entity.
@@ -2202,6 +2212,7 @@
"__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2",
"__doc__": """Video frame level annotation results for explicit content.
+
Attributes:
time_offset:
Time-offset, relative to the beginning of the video,
@@ -2224,6 +2235,7 @@
If no explicit content has been detected in a frame, no annotations
are present for that frame.
+
Attributes:
frames:
All video frames where explicit content was detected.
@@ -2241,6 +2253,7 @@
"__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2",
"__doc__": """Annotation results for a single video.
+
Attributes:
input_uri:
Output only. Video file location in `Google Cloud Storage
@@ -2280,6 +2293,7 @@
``Operation`` returned by the ``GetOperation`` call of the
``google::longrunning::Operations`` service.
+
Attributes:
annotation_results:
Annotation results for all videos specified in
@@ -2298,6 +2312,7 @@
"__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2",
"__doc__": """Annotation progress for a single video.
+
Attributes:
input_uri:
Output only. Video file location in `Google Cloud Storage
@@ -2325,6 +2340,7 @@
``Operation`` returned by the ``GetOperation`` call of the
``google::longrunning::Operations`` service.
+
Attributes:
annotation_progress:
Progress metadata for all videos specified in
@@ -2343,6 +2359,7 @@
"__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2",
"__doc__": """Config for SPEECH_TRANSCRIPTION.
+
Attributes:
language_code:
Required. *Required* The language of the supplied audio as a
@@ -2395,6 +2412,7 @@
"__doc__": """Provides “hints” to the speech recognizer to favor specific words and
phrases in the results.
+
Attributes:
phrases:
Optional. A list of strings containing words and phrases
@@ -2419,6 +2437,7 @@
"__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2",
"__doc__": """A speech recognition result corresponding to a portion of the audio.
+
Attributes:
alternatives:
May contain one or more recognition hypotheses (up to the
@@ -2440,6 +2459,7 @@
"__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2",
"__doc__": """Alternative hypotheses (a.k.a. n-best list).
+
Attributes:
transcript:
Output only. Transcript text representing the words that the
@@ -2471,6 +2491,7 @@
only included in the response when certain request parameters are set,
such as ``enable_word_time_offsets``.
+
Attributes:
start_time:
Output only. Time offset relative to the beginning of the
diff --git a/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2_grpc.py b/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2_grpc.py
index ee4e104e..401c8a06 100644
--- a/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2_grpc.py
+++ b/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2_grpc.py
@@ -1,4 +1,5 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.cloud.videointelligence_v1p1beta1.proto import (
@@ -11,14 +12,14 @@
class VideoIntelligenceServiceStub(object):
"""Service that implements Google Cloud Video Intelligence API.
- """
+ """
def __init__(self, channel):
"""Constructor.
- Args:
- channel: A grpc.Channel.
- """
+ Args:
+ channel: A grpc.Channel.
+ """
self.AnnotateVideo = channel.unary_unary(
"/google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService/AnnotateVideo",
request_serializer=google_dot_cloud_dot_videointelligence__v1p1beta1_dot_proto_dot_video__intelligence__pb2.AnnotateVideoRequest.SerializeToString,
@@ -28,14 +29,14 @@ def __init__(self, channel):
class VideoIntelligenceServiceServicer(object):
"""Service that implements Google Cloud Video Intelligence API.
- """
+ """
def AnnotateVideo(self, request, context):
"""Performs asynchronous video annotation. Progress and results can be
- retrieved through the `google.longrunning.Operations` interface.
- `Operation.metadata` contains `AnnotateVideoProgress` (progress).
- `Operation.response` contains `AnnotateVideoResponse` (results).
- """
+ retrieved through the `google.longrunning.Operations` interface.
+ `Operation.metadata` contains `AnnotateVideoProgress` (progress).
+ `Operation.response` contains `AnnotateVideoResponse` (results).
+ """
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
@@ -54,3 +55,36 @@ def add_VideoIntelligenceServiceServicer_to_server(servicer, server):
rpc_method_handlers,
)
server.add_generic_rpc_handlers((generic_handler,))
+
+
+# This class is part of an EXPERIMENTAL API.
+class VideoIntelligenceService(object):
+ """Service that implements Google Cloud Video Intelligence API.
+ """
+
+ @staticmethod
+ def AnnotateVideo(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService/AnnotateVideo",
+ google_dot_cloud_dot_videointelligence__v1p1beta1_dot_proto_dot_video__intelligence__pb2.AnnotateVideoRequest.SerializeToString,
+ google_dot_longrunning_dot_operations__pb2.Operation.FromString,
+ options,
+ channel_credentials,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ )
diff --git a/google/cloud/videointelligence_v1p2beta1/gapic/video_intelligence_service_client_config.py b/google/cloud/videointelligence_v1p2beta1/gapic/video_intelligence_service_client_config.py
index 4c0b9f38..104643b4 100644
--- a/google/cloud/videointelligence_v1p2beta1/gapic/video_intelligence_service_client_config.py
+++ b/google/cloud/videointelligence_v1p2beta1/gapic/video_intelligence_service_client_config.py
@@ -2,25 +2,34 @@
"interfaces": {
"google.cloud.videointelligence.v1p2beta1.VideoIntelligenceService": {
"retry_codes": {
- "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
- "non_idempotent": [],
+ "retry_policy_1_codes": ["UNAVAILABLE", "DEADLINE_EXCEEDED"],
+ "no_retry_codes": [],
},
"retry_params": {
- "default": {
+ "retry_policy_1_params": {
"initial_retry_delay_millis": 1000,
"retry_delay_multiplier": 2.5,
"max_retry_delay_millis": 120000,
- "initial_rpc_timeout_millis": 120000,
+ "initial_rpc_timeout_millis": 600000,
"rpc_timeout_multiplier": 1.0,
- "max_rpc_timeout_millis": 120000,
+ "max_rpc_timeout_millis": 600000,
"total_timeout_millis": 600000,
- }
+ },
+ "no_retry_params": {
+ "initial_retry_delay_millis": 0,
+ "retry_delay_multiplier": 0.0,
+ "max_retry_delay_millis": 0,
+ "initial_rpc_timeout_millis": 0,
+ "rpc_timeout_multiplier": 1.0,
+ "max_rpc_timeout_millis": 0,
+ "total_timeout_millis": 0,
+ },
},
"methods": {
"AnnotateVideo": {
"timeout_millis": 600000,
- "retry_codes_name": "idempotent",
- "retry_params_name": "default",
+ "retry_codes_name": "retry_policy_1_codes",
+ "retry_params_name": "retry_policy_1_params",
}
},
}
diff --git a/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py
index 8c9b179f..5d55b50f 100644
--- a/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py
+++ b/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/videointelligence_v1p2beta1/proto/video_intelligence.proto
-
+"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
@@ -2209,6 +2209,7 @@
"__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2",
"__doc__": """Video annotation request.
+
Attributes:
input_uri:
Input video location. Currently, only `Google Cloud Storage
@@ -2259,6 +2260,7 @@
"__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2",
"__doc__": """Video context and/or feature-specific parameters.
+
Attributes:
segments:
Video segments to annotate. The segments may overlap and are
@@ -2286,6 +2288,7 @@
"__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2",
"__doc__": """Config for LABEL_DETECTION.
+
Attributes:
label_detection_mode:
What labels should be detected with LABEL_DETECTION, in
@@ -2313,6 +2316,7 @@
"__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2",
"__doc__": """Config for SHOT_CHANGE_DETECTION.
+
Attributes:
model:
Model to use for shot change detection. Supported values:
@@ -2331,6 +2335,7 @@
"__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2",
"__doc__": """Config for EXPLICIT_CONTENT_DETECTION.
+
Attributes:
model:
Model to use for explicit content detection. Supported values:
@@ -2349,6 +2354,7 @@
"__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2",
"__doc__": """Config for TEXT_DETECTION.
+
Attributes:
language_hints:
Language hint can be specified if the language to be detected
@@ -2370,6 +2376,7 @@
"__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2",
"__doc__": """Video segment.
+
Attributes:
start_time_offset:
Time-offset, relative to the beginning of the video,
@@ -2391,6 +2398,7 @@
"__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2",
"__doc__": """Video segment level annotation results for label detection.
+
Attributes:
segment:
Video segment where a label was detected.
@@ -2410,6 +2418,7 @@
"__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2",
"__doc__": """Video frame level annotation results for label detection.
+
Attributes:
time_offset:
Time-offset, relative to the beginning of the video,
@@ -2430,6 +2439,7 @@
"__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2",
"__doc__": """Detected entity from video analysis.
+
Attributes:
entity_id:
Opaque entity ID. Some IDs may be available in `Google
@@ -2453,6 +2463,7 @@
"__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2",
"__doc__": """Label annotation.
+
Attributes:
entity:
Detected entity.
@@ -2479,6 +2490,7 @@
"__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2",
"__doc__": """Video frame level annotation results for explicit content.
+
Attributes:
time_offset:
Time-offset, relative to the beginning of the video,
@@ -2501,6 +2513,7 @@
If no explicit content has been detected in a frame, no annotations
are present for that frame.
+
Attributes:
frames:
All video frames where explicit content was detected.
@@ -2519,6 +2532,7 @@
"__doc__": """Normalized bounding box. The normalized vertex coordinates are
relative to the original image. Range: [0, 1].
+
Attributes:
left:
Left X coordinate.
@@ -2542,6 +2556,7 @@
"__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2",
"__doc__": """Annotation results for a single video.
+
Attributes:
input_uri:
Video file location in `Google Cloud Storage
@@ -2585,6 +2600,7 @@
``Operation`` returned by the ``GetOperation`` call of the
``google::longrunning::Operations`` service.
+
Attributes:
annotation_results:
Annotation results for all videos specified in
@@ -2603,6 +2619,7 @@
"__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2",
"__doc__": """Annotation progress for a single video.
+
Attributes:
input_uri:
Video file location in `Google Cloud Storage
@@ -2630,6 +2647,7 @@
``Operation`` returned by the ``GetOperation`` call of the
``google::longrunning::Operations`` service.
+
Attributes:
annotation_progress:
Progress metadata for all videos specified in
@@ -2648,6 +2666,7 @@
"__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2",
"__doc__": """X coordinate.
+
Attributes:
y:
Y coordinate.
@@ -2672,6 +2691,7 @@
3). Note that values can be less than 0, or greater than 1 due to
trignometric calculations for location of the box.
+
Attributes:
vertices:
Normalized vertices of the bounding polygon.
@@ -2689,6 +2709,7 @@
"__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2",
"__doc__": """Video segment level annotation results for text detection.
+
Attributes:
segment:
Video segment where a text snippet was detected.
@@ -2714,6 +2735,7 @@
Contains information regarding timestamp and bounding box locations
for the frames containing detected OCR text snippets.
+
Attributes:
rotated_bounding_box:
Bounding polygon of the detected text for this frame.
@@ -2735,6 +2757,7 @@
contain the corresponding text, confidence value, and frame level
information for each detection.
+
Attributes:
text:
The detected text.
@@ -2755,6 +2778,7 @@
"__doc__": """Video frame level annotations for object detection and tracking. This
field stores per frame location, time offset, and confidence.
+
Attributes:
normalized_bounding_box:
The normalized bounding box location of this object track for
@@ -2775,6 +2799,7 @@
"__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2",
"__doc__": """Annotations corresponding to one tracked object.
+
Attributes:
entity:
Entity to specify the object category that this track is
diff --git a/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2_grpc.py b/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2_grpc.py
index 79221da0..519e851b 100644
--- a/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2_grpc.py
+++ b/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2_grpc.py
@@ -1,4 +1,5 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.cloud.videointelligence_v1p2beta1.proto import (
@@ -11,14 +12,14 @@
class VideoIntelligenceServiceStub(object):
"""Service that implements Google Cloud Video Intelligence API.
- """
+ """
def __init__(self, channel):
"""Constructor.
- Args:
- channel: A grpc.Channel.
- """
+ Args:
+ channel: A grpc.Channel.
+ """
self.AnnotateVideo = channel.unary_unary(
"/google.cloud.videointelligence.v1p2beta1.VideoIntelligenceService/AnnotateVideo",
request_serializer=google_dot_cloud_dot_videointelligence__v1p2beta1_dot_proto_dot_video__intelligence__pb2.AnnotateVideoRequest.SerializeToString,
@@ -28,14 +29,14 @@ def __init__(self, channel):
class VideoIntelligenceServiceServicer(object):
"""Service that implements Google Cloud Video Intelligence API.
- """
+ """
def AnnotateVideo(self, request, context):
"""Performs asynchronous video annotation. Progress and results can be
- retrieved through the `google.longrunning.Operations` interface.
- `Operation.metadata` contains `AnnotateVideoProgress` (progress).
- `Operation.response` contains `AnnotateVideoResponse` (results).
- """
+ retrieved through the `google.longrunning.Operations` interface.
+ `Operation.metadata` contains `AnnotateVideoProgress` (progress).
+ `Operation.response` contains `AnnotateVideoResponse` (results).
+ """
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
@@ -54,3 +55,36 @@ def add_VideoIntelligenceServiceServicer_to_server(servicer, server):
rpc_method_handlers,
)
server.add_generic_rpc_handlers((generic_handler,))
+
+
+# This class is part of an EXPERIMENTAL API.
+class VideoIntelligenceService(object):
+ """Service that implements Google Cloud Video Intelligence API.
+ """
+
+ @staticmethod
+ def AnnotateVideo(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.cloud.videointelligence.v1p2beta1.VideoIntelligenceService/AnnotateVideo",
+ google_dot_cloud_dot_videointelligence__v1p2beta1_dot_proto_dot_video__intelligence__pb2.AnnotateVideoRequest.SerializeToString,
+ google_dot_longrunning_dot_operations__pb2.Operation.FromString,
+ options,
+ channel_credentials,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ )
diff --git a/google/cloud/videointelligence_v1p3beta1/gapic/streaming_video_intelligence_service_client_config.py b/google/cloud/videointelligence_v1p3beta1/gapic/streaming_video_intelligence_service_client_config.py
index 2cf8bb06..06034424 100644
--- a/google/cloud/videointelligence_v1p3beta1/gapic/streaming_video_intelligence_service_client_config.py
+++ b/google/cloud/videointelligence_v1p3beta1/gapic/streaming_video_intelligence_service_client_config.py
@@ -2,11 +2,21 @@
"interfaces": {
"google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService": {
"retry_codes": {
- "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
- "non_idempotent": [],
+ "retry_policy_1_codes": ["UNAVAILABLE", "DEADLINE_EXCEEDED"],
+ "no_retry_codes": [],
+ "retry_policy_2_codes": ["UNAVAILABLE", "DEADLINE_EXCEEDED"],
},
"retry_params": {
- "default": {
+ "retry_policy_1_params": {
+ "initial_retry_delay_millis": 1000,
+ "retry_delay_multiplier": 2.5,
+ "max_retry_delay_millis": 120000,
+ "initial_rpc_timeout_millis": 600000,
+ "rpc_timeout_multiplier": 1.0,
+ "max_rpc_timeout_millis": 600000,
+ "total_timeout_millis": 600000,
+ },
+ "retry_policy_2_params": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
@@ -14,13 +24,22 @@
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 10800000,
"total_timeout_millis": 10800000,
- }
+ },
+ "no_retry_params": {
+ "initial_retry_delay_millis": 0,
+ "retry_delay_multiplier": 0.0,
+ "max_retry_delay_millis": 0,
+ "initial_rpc_timeout_millis": 0,
+ "rpc_timeout_multiplier": 1.0,
+ "max_rpc_timeout_millis": 0,
+ "total_timeout_millis": 0,
+ },
},
"methods": {
"StreamingAnnotateVideo": {
- "timeout_millis": 60000,
- "retry_codes_name": "idempotent",
- "retry_params_name": "default",
+ "timeout_millis": 10800000,
+ "retry_codes_name": "retry_policy_2_codes",
+ "retry_params_name": "retry_policy_2_params",
}
},
}
diff --git a/google/cloud/videointelligence_v1p3beta1/gapic/video_intelligence_service_client_config.py b/google/cloud/videointelligence_v1p3beta1/gapic/video_intelligence_service_client_config.py
index c15923a1..beb8770d 100644
--- a/google/cloud/videointelligence_v1p3beta1/gapic/video_intelligence_service_client_config.py
+++ b/google/cloud/videointelligence_v1p3beta1/gapic/video_intelligence_service_client_config.py
@@ -2,25 +2,34 @@
"interfaces": {
"google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService": {
"retry_codes": {
- "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
- "non_idempotent": [],
+ "retry_policy_1_codes": ["UNAVAILABLE", "DEADLINE_EXCEEDED"],
+ "no_retry_codes": [],
},
"retry_params": {
- "default": {
+ "retry_policy_1_params": {
"initial_retry_delay_millis": 1000,
"retry_delay_multiplier": 2.5,
"max_retry_delay_millis": 120000,
- "initial_rpc_timeout_millis": 120000,
+ "initial_rpc_timeout_millis": 600000,
"rpc_timeout_multiplier": 1.0,
- "max_rpc_timeout_millis": 120000,
+ "max_rpc_timeout_millis": 600000,
"total_timeout_millis": 600000,
- }
+ },
+ "no_retry_params": {
+ "initial_retry_delay_millis": 0,
+ "retry_delay_multiplier": 0.0,
+ "max_retry_delay_millis": 0,
+ "initial_rpc_timeout_millis": 0,
+ "rpc_timeout_multiplier": 1.0,
+ "max_rpc_timeout_millis": 0,
+ "total_timeout_millis": 0,
+ },
},
"methods": {
"AnnotateVideo": {
"timeout_millis": 600000,
- "retry_codes_name": "idempotent",
- "retry_params_name": "default",
+ "retry_codes_name": "retry_policy_1_codes",
+ "retry_params_name": "retry_policy_1_params",
}
},
}
diff --git a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py
index 1d0bf911..9d118459 100644
--- a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py
+++ b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/videointelligence_v1p3beta1/proto/video_intelligence.proto
-
+"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
@@ -5140,6 +5140,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Video annotation request.
+
Attributes:
input_uri:
Input video location. Currently, only `Cloud Storage
@@ -5190,6 +5191,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Video context and/or feature-specific parameters.
+
Attributes:
segments:
Video segments to annotate. The segments may overlap and are
@@ -5225,6 +5227,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Config for LABEL_DETECTION.
+
Attributes:
label_detection_mode:
What labels should be detected with LABEL_DETECTION, in
@@ -5267,6 +5270,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Config for SHOT_CHANGE_DETECTION.
+
Attributes:
model:
Model to use for shot change detection. Supported values:
@@ -5285,6 +5289,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Config for OBJECT_TRACKING.
+
Attributes:
model:
Model to use for object tracking. Supported values:
@@ -5303,6 +5308,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Config for EXPLICIT_CONTENT_DETECTION.
+
Attributes:
model:
Model to use for explicit content detection. Supported values:
@@ -5321,6 +5327,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Config for FACE_DETECTION.
+
Attributes:
model:
Model to use for face detection. Supported values:
@@ -5346,6 +5353,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Config for PERSON_DETECTION.
+
Attributes:
include_bounding_boxes:
Whether bounding boxes are included in the person detection
@@ -5372,6 +5380,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Config for TEXT_DETECTION.
+
Attributes:
language_hints:
Language hint can be specified if the language to be detected
@@ -5396,6 +5405,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Video segment.
+
Attributes:
start_time_offset:
Time-offset, relative to the beginning of the video,
@@ -5417,6 +5427,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Video segment level annotation results for label detection.
+
Attributes:
segment:
Video segment where a label was detected.
@@ -5436,6 +5447,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Video frame level annotation results for label detection.
+
Attributes:
time_offset:
Time-offset, relative to the beginning of the video,
@@ -5456,6 +5468,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Detected entity from video analysis.
+
Attributes:
entity_id:
Opaque entity ID. Some IDs may be available in `Google
@@ -5479,6 +5492,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Label annotation.
+
Attributes:
entity:
Detected entity.
@@ -5505,6 +5519,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Video frame level annotation results for explicit content.
+
Attributes:
time_offset:
Time-offset, relative to the beginning of the video,
@@ -5527,6 +5542,7 @@
If no explicit content has been detected in a frame, no annotations
are present for that frame.
+
Attributes:
frames:
All video frames where explicit content was detected.
@@ -5545,6 +5561,7 @@
"__doc__": """Normalized bounding box. The normalized vertex coordinates are
relative to the original image. Range: [0, 1].
+
Attributes:
left:
Left X coordinate.
@@ -5569,6 +5586,7 @@
"__doc__": """For tracking related features. An object at time_offset with
attributes, and located with normalized_bounding_box.
+
Attributes:
normalized_bounding_box:
Normalized Bounding box in a frame, where the object is
@@ -5594,6 +5612,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """A track of an object instance.
+
Attributes:
segment:
Video segment of a track.
@@ -5618,6 +5637,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """A generic detected attribute represented by name in string format.
+
Attributes:
name:
The name of the attribute, for example, glasses, dark_glasses,
@@ -5642,6 +5662,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Celebrity definition.
+
Attributes:
name:
The resource name of the celebrity. Have the format ``video-
@@ -5671,6 +5692,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """The recognized celebrity with confidence score.
+
Attributes:
celebrity:
The recognized celebrity.
@@ -5686,6 +5708,7 @@
field could be empty if the face track does not have any matched
celebrities.
+
Attributes:
celebrities:
Top N match of the celebrities for the face in this track.
@@ -5706,6 +5729,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Celebrity recognition annotation per video.
+
Attributes:
celebrity_tracks:
The tracks detected from the input video, including recognized
@@ -5725,6 +5749,7 @@
"__doc__": """A generic detected landmark represented by name in string format and a
2D location.
+
Attributes:
name:
The name of this landmark, for example, left_hand,
@@ -5749,6 +5774,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Face detection annotation.
+
Attributes:
tracks:
The face tracks with attributes.
@@ -5768,6 +5794,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Person detection annotation per video.
+
Attributes:
tracks:
The detected tracks of a person.
@@ -5785,6 +5812,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Annotation results for a single video.
+
Attributes:
input_uri:
Video file location in `Cloud Storage
@@ -5859,6 +5887,7 @@
``Operation`` returned by the ``GetOperation`` call of the
``google::longrunning::Operations`` service.
+
Attributes:
annotation_results:
Annotation results for all videos specified in
@@ -5877,6 +5906,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Annotation progress for a single video.
+
Attributes:
input_uri:
Video file location in `Cloud Storage
@@ -5910,6 +5940,7 @@
``Operation`` returned by the ``GetOperation`` call of the
``google::longrunning::Operations`` service.
+
Attributes:
annotation_progress:
Progress metadata for all videos specified in
@@ -5928,6 +5959,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Config for SPEECH_TRANSCRIPTION.
+
Attributes:
language_code:
Required. *Required* The language of the supplied audio as a
@@ -5998,6 +6030,7 @@
"__doc__": """Provides “hints” to the speech recognizer to favor specific words and
phrases in the results.
+
Attributes:
phrases:
Optional. A list of strings containing words and phrases
@@ -6022,6 +6055,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """A speech recognition result corresponding to a portion of the audio.
+
Attributes:
alternatives:
May contain one or more recognition hypotheses (up to the
@@ -6048,6 +6082,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Alternative hypotheses (a.k.a. n-best list).
+
Attributes:
transcript:
Transcript text representing the words that the user spoke.
@@ -6080,6 +6115,7 @@
only included in the response when certain request parameters are set,
such as ``enable_word_time_offsets``.
+
Attributes:
start_time:
Time offset relative to the beginning of the audio, and
@@ -6123,6 +6159,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """X coordinate.
+
Attributes:
y:
Y coordinate.
@@ -6147,6 +6184,7 @@
3). Note that values can be less than 0, or greater than 1 due to
trignometric calculations for location of the box.
+
Attributes:
vertices:
Normalized vertices of the bounding polygon.
@@ -6164,6 +6202,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Video segment level annotation results for text detection.
+
Attributes:
segment:
Video segment where a text snippet was detected.
@@ -6189,6 +6228,7 @@
Contains information regarding timestamp and bounding box locations
for the frames containing detected OCR text snippets.
+
Attributes:
rotated_bounding_box:
Bounding polygon of the detected text for this frame.
@@ -6210,6 +6250,7 @@
contain the corresponding text, confidence value, and frame level
information for each detection.
+
Attributes:
text:
The detected text.
@@ -6230,6 +6271,7 @@
"__doc__": """Video frame level annotations for object detection and tracking. This
field stores per frame location, time offset, and confidence.
+
Attributes:
normalized_bounding_box:
The normalized bounding box location of this object track for
@@ -6250,6 +6292,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Annotations corresponding to one tracked object.
+
Attributes:
track_info:
Different representation of tracking info in non-streaming
@@ -6290,6 +6333,7 @@
"__doc__": """Annotation corresponding to one detected, tracked and recognized logo
class.
+
Attributes:
entity:
Entity category information to specify the logo class that all
@@ -6321,6 +6365,7 @@
must only contain a ``StreamingVideoConfig`` message. All subsequent
messages must only contain ``input_content`` data.
+
Attributes:
streaming_request:
\ *Required* The streaming request, which is either a
@@ -6353,6 +6398,7 @@
"__doc__": """Provides information to the annotator that specifies how to process
the request.
+
Attributes:
streaming_config:
Config for requested annotation feature.
@@ -6391,6 +6437,7 @@
``StreamingAnnotateVideoResponse`` messages are streamed back to the
client.
+
Attributes:
error:
If set, returns a [google.rpc.Status][google.rpc.Status]
@@ -6417,6 +6464,7 @@
"__doc__": """Streaming annotation results corresponding to a portion of the video
that is currently being processed.
+
Attributes:
shot_annotations:
Shot annotation results. Each shot is represented as a video
@@ -6453,6 +6501,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Config for STREAMING_LABEL_DETECTION.
+
Attributes:
stationary_camera:
Whether the video has been captured from a stationary
@@ -6496,6 +6545,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Config for STREAMING_AUTOML_ACTION_RECOGNITION.
+
Attributes:
model_name:
Resource name of AutoML model. Format: ``projects/{project_id}
@@ -6514,6 +6564,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Config for STREAMING_AUTOML_CLASSIFICATION.
+
Attributes:
model_name:
Resource name of AutoML model. Format: ``projects/{project_num
@@ -6532,6 +6583,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Config for STREAMING_AUTOML_OBJECT_TRACKING.
+
Attributes:
model_name:
Resource name of AutoML model. Format: ``projects/{project_id}
@@ -6550,6 +6602,7 @@
"__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2",
"__doc__": """Config for streaming storage option.
+
Attributes:
enable_storage_annotation_result:
Enable streaming storage. Default: false.
diff --git a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2_grpc.py b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2_grpc.py
index 3bbfc2bb..e36fd3f0 100644
--- a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2_grpc.py
+++ b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2_grpc.py
@@ -1,4 +1,5 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.cloud.videointelligence_v1p3beta1.proto import (
@@ -11,14 +12,14 @@
class VideoIntelligenceServiceStub(object):
"""Service that implements the Video Intelligence API.
- """
+ """
def __init__(self, channel):
"""Constructor.
- Args:
- channel: A grpc.Channel.
- """
+ Args:
+ channel: A grpc.Channel.
+ """
self.AnnotateVideo = channel.unary_unary(
"/google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService/AnnotateVideo",
request_serializer=google_dot_cloud_dot_videointelligence__v1p3beta1_dot_proto_dot_video__intelligence__pb2.AnnotateVideoRequest.SerializeToString,
@@ -28,14 +29,14 @@ def __init__(self, channel):
class VideoIntelligenceServiceServicer(object):
"""Service that implements the Video Intelligence API.
- """
+ """
def AnnotateVideo(self, request, context):
"""Performs asynchronous video annotation. Progress and results can be
- retrieved through the `google.longrunning.Operations` interface.
- `Operation.metadata` contains `AnnotateVideoProgress` (progress).
- `Operation.response` contains `AnnotateVideoResponse` (results).
- """
+ retrieved through the `google.longrunning.Operations` interface.
+ `Operation.metadata` contains `AnnotateVideoProgress` (progress).
+ `Operation.response` contains `AnnotateVideoResponse` (results).
+ """
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
@@ -56,16 +57,49 @@ def add_VideoIntelligenceServiceServicer_to_server(servicer, server):
server.add_generic_rpc_handlers((generic_handler,))
+# This class is part of an EXPERIMENTAL API.
+class VideoIntelligenceService(object):
+ """Service that implements the Video Intelligence API.
+ """
+
+ @staticmethod
+ def AnnotateVideo(
+ request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ "/google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService/AnnotateVideo",
+ google_dot_cloud_dot_videointelligence__v1p3beta1_dot_proto_dot_video__intelligence__pb2.AnnotateVideoRequest.SerializeToString,
+ google_dot_longrunning_dot_operations__pb2.Operation.FromString,
+ options,
+ channel_credentials,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ )
+
+
class StreamingVideoIntelligenceServiceStub(object):
"""Service that implements streaming Video Intelligence API.
- """
+ """
def __init__(self, channel):
"""Constructor.
- Args:
- channel: A grpc.Channel.
- """
+ Args:
+ channel: A grpc.Channel.
+ """
self.StreamingAnnotateVideo = channel.stream_stream(
"/google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService/StreamingAnnotateVideo",
request_serializer=google_dot_cloud_dot_videointelligence__v1p3beta1_dot_proto_dot_video__intelligence__pb2.StreamingAnnotateVideoRequest.SerializeToString,
@@ -75,13 +109,13 @@ def __init__(self, channel):
class StreamingVideoIntelligenceServiceServicer(object):
"""Service that implements streaming Video Intelligence API.
- """
+ """
def StreamingAnnotateVideo(self, request_iterator, context):
"""Performs video annotation with bidirectional streaming: emitting results
- while sending video/audio bytes.
- This method is only available via the gRPC API (not REST).
- """
+ while sending video/audio bytes.
+ This method is only available via the gRPC API (not REST).
+ """
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
@@ -100,3 +134,36 @@ def add_StreamingVideoIntelligenceServiceServicer_to_server(servicer, server):
rpc_method_handlers,
)
server.add_generic_rpc_handlers((generic_handler,))
+
+
+# This class is part of an EXPERIMENTAL API.
+class StreamingVideoIntelligenceService(object):
+ """Service that implements streaming Video Intelligence API.
+ """
+
+ @staticmethod
+ def StreamingAnnotateVideo(
+ request_iterator,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None,
+ ):
+ return grpc.experimental.stream_stream(
+ request_iterator,
+ target,
+ "/google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService/StreamingAnnotateVideo",
+ google_dot_cloud_dot_videointelligence__v1p3beta1_dot_proto_dot_video__intelligence__pb2.StreamingAnnotateVideoRequest.SerializeToString,
+ google_dot_cloud_dot_videointelligence__v1p3beta1_dot_proto_dot_video__intelligence__pb2.StreamingAnnotateVideoResponse.FromString,
+ options,
+ channel_credentials,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ )
diff --git a/noxfile.py b/noxfile.py
index c0081b20..3c679616 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -149,12 +149,11 @@ def docs(session):
"""Build the docs for this library."""
session.install("-e", ".")
- session.install("sphinx<=3.0.0", "alabaster", "recommonmark")
+ session.install("sphinx", "alabaster", "recommonmark")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
- "-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
diff --git a/synth.metadata b/synth.metadata
index 972a2af2..9f6da601 100644
--- a/synth.metadata
+++ b/synth.metadata
@@ -3,8 +3,8 @@
{
"git": {
"name": ".",
- "remote": "git@github.com:danoscarmike/python-videointelligence",
- "sha": "7613d6b7734f439dcd1e300202d6af2fd1b88514"
+ "remote": "https://github.com/googleapis/python-videointelligence.git",
+ "sha": "fc0d881254328e6c96d89fd8bf9db29646a02ea0"
}
},
{
@@ -76,5 +76,157 @@
"generator": "bazel"
}
}
+ ],
+ "generatedFiles": [
+ ".coveragerc",
+ ".flake8",
+ ".github/CONTRIBUTING.md",
+ ".github/ISSUE_TEMPLATE/bug_report.md",
+ ".github/ISSUE_TEMPLATE/feature_request.md",
+ ".github/ISSUE_TEMPLATE/support_request.md",
+ ".github/PULL_REQUEST_TEMPLATE.md",
+ ".github/release-please.yml",
+ ".github/snippet-bot.yml",
+ ".gitignore",
+ ".kokoro/build.sh",
+ ".kokoro/continuous/common.cfg",
+ ".kokoro/continuous/continuous.cfg",
+ ".kokoro/docker/docs/Dockerfile",
+ ".kokoro/docker/docs/fetch_gpg_keys.sh",
+ ".kokoro/docs/common.cfg",
+ ".kokoro/docs/docs-presubmit.cfg",
+ ".kokoro/docs/docs.cfg",
+ ".kokoro/populate-secrets.sh",
+ ".kokoro/presubmit/common.cfg",
+ ".kokoro/presubmit/presubmit.cfg",
+ ".kokoro/publish-docs.sh",
+ ".kokoro/release.sh",
+ ".kokoro/release/common.cfg",
+ ".kokoro/release/release.cfg",
+ ".kokoro/samples/lint/common.cfg",
+ ".kokoro/samples/lint/continuous.cfg",
+ ".kokoro/samples/lint/periodic.cfg",
+ ".kokoro/samples/lint/presubmit.cfg",
+ ".kokoro/samples/python3.6/common.cfg",
+ ".kokoro/samples/python3.6/continuous.cfg",
+ ".kokoro/samples/python3.6/periodic.cfg",
+ ".kokoro/samples/python3.6/presubmit.cfg",
+ ".kokoro/samples/python3.7/common.cfg",
+ ".kokoro/samples/python3.7/continuous.cfg",
+ ".kokoro/samples/python3.7/periodic.cfg",
+ ".kokoro/samples/python3.7/presubmit.cfg",
+ ".kokoro/samples/python3.8/common.cfg",
+ ".kokoro/samples/python3.8/continuous.cfg",
+ ".kokoro/samples/python3.8/periodic.cfg",
+ ".kokoro/samples/python3.8/presubmit.cfg",
+ ".kokoro/test-samples.sh",
+ ".kokoro/trampoline.sh",
+ ".kokoro/trampoline_v2.sh",
+ ".trampolinerc",
+ "CODE_OF_CONDUCT.md",
+ "CONTRIBUTING.rst",
+ "LICENSE",
+ "MANIFEST.in",
+ "docs/_static/custom.css",
+ "docs/_templates/layout.html",
+ "docs/conf.py",
+ "docs/gapic/v1/api.rst",
+ "docs/gapic/v1/types.rst",
+ "docs/gapic/v1beta2/api.rst",
+ "docs/gapic/v1beta2/types.rst",
+ "docs/gapic/v1p1beta1/api.rst",
+ "docs/gapic/v1p1beta1/types.rst",
+ "docs/gapic/v1p2beta1/api.rst",
+ "docs/gapic/v1p2beta1/types.rst",
+ "docs/gapic/v1p3beta1/api.rst",
+ "docs/gapic/v1p3beta1/types.rst",
+ "docs/multiprocessing.rst",
+ "google/__init__.py",
+ "google/cloud/__init__.py",
+ "google/cloud/videointelligence.py",
+ "google/cloud/videointelligence_v1/__init__.py",
+ "google/cloud/videointelligence_v1/gapic/__init__.py",
+ "google/cloud/videointelligence_v1/gapic/enums.py",
+ "google/cloud/videointelligence_v1/gapic/transports/__init__.py",
+ "google/cloud/videointelligence_v1/gapic/transports/video_intelligence_service_grpc_transport.py",
+ "google/cloud/videointelligence_v1/gapic/video_intelligence_service_client.py",
+ "google/cloud/videointelligence_v1/gapic/video_intelligence_service_client_config.py",
+ "google/cloud/videointelligence_v1/proto/__init__.py",
+ "google/cloud/videointelligence_v1/proto/video_intelligence.proto",
+ "google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py",
+ "google/cloud/videointelligence_v1/proto/video_intelligence_pb2_grpc.py",
+ "google/cloud/videointelligence_v1/types.py",
+ "google/cloud/videointelligence_v1beta2/__init__.py",
+ "google/cloud/videointelligence_v1beta2/gapic/__init__.py",
+ "google/cloud/videointelligence_v1beta2/gapic/enums.py",
+ "google/cloud/videointelligence_v1beta2/gapic/transports/__init__.py",
+ "google/cloud/videointelligence_v1beta2/gapic/transports/video_intelligence_service_grpc_transport.py",
+ "google/cloud/videointelligence_v1beta2/gapic/video_intelligence_service_client.py",
+ "google/cloud/videointelligence_v1beta2/gapic/video_intelligence_service_client_config.py",
+ "google/cloud/videointelligence_v1beta2/proto/__init__.py",
+ "google/cloud/videointelligence_v1beta2/proto/video_intelligence.proto",
+ "google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py",
+ "google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2_grpc.py",
+ "google/cloud/videointelligence_v1beta2/types.py",
+ "google/cloud/videointelligence_v1p1beta1/__init__.py",
+ "google/cloud/videointelligence_v1p1beta1/gapic/__init__.py",
+ "google/cloud/videointelligence_v1p1beta1/gapic/enums.py",
+ "google/cloud/videointelligence_v1p1beta1/gapic/transports/__init__.py",
+ "google/cloud/videointelligence_v1p1beta1/gapic/transports/video_intelligence_service_grpc_transport.py",
+ "google/cloud/videointelligence_v1p1beta1/gapic/video_intelligence_service_client.py",
+ "google/cloud/videointelligence_v1p1beta1/gapic/video_intelligence_service_client_config.py",
+ "google/cloud/videointelligence_v1p1beta1/proto/__init__.py",
+ "google/cloud/videointelligence_v1p1beta1/proto/video_intelligence.proto",
+ "google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2.py",
+ "google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2_grpc.py",
+ "google/cloud/videointelligence_v1p1beta1/types.py",
+ "google/cloud/videointelligence_v1p2beta1/__init__.py",
+ "google/cloud/videointelligence_v1p2beta1/gapic/__init__.py",
+ "google/cloud/videointelligence_v1p2beta1/gapic/enums.py",
+ "google/cloud/videointelligence_v1p2beta1/gapic/transports/__init__.py",
+ "google/cloud/videointelligence_v1p2beta1/gapic/transports/video_intelligence_service_grpc_transport.py",
+ "google/cloud/videointelligence_v1p2beta1/gapic/video_intelligence_service_client.py",
+ "google/cloud/videointelligence_v1p2beta1/gapic/video_intelligence_service_client_config.py",
+ "google/cloud/videointelligence_v1p2beta1/proto/__init__.py",
+ "google/cloud/videointelligence_v1p2beta1/proto/video_intelligence.proto",
+ "google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py",
+ "google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2_grpc.py",
+ "google/cloud/videointelligence_v1p2beta1/types.py",
+ "google/cloud/videointelligence_v1p3beta1/__init__.py",
+ "google/cloud/videointelligence_v1p3beta1/gapic/__init__.py",
+ "google/cloud/videointelligence_v1p3beta1/gapic/enums.py",
+ "google/cloud/videointelligence_v1p3beta1/gapic/streaming_video_intelligence_service_client.py",
+ "google/cloud/videointelligence_v1p3beta1/gapic/streaming_video_intelligence_service_client_config.py",
+ "google/cloud/videointelligence_v1p3beta1/gapic/transports/__init__.py",
+ "google/cloud/videointelligence_v1p3beta1/gapic/transports/streaming_video_intelligence_service_grpc_transport.py",
+ "google/cloud/videointelligence_v1p3beta1/gapic/transports/video_intelligence_service_grpc_transport.py",
+ "google/cloud/videointelligence_v1p3beta1/gapic/video_intelligence_service_client.py",
+ "google/cloud/videointelligence_v1p3beta1/gapic/video_intelligence_service_client_config.py",
+ "google/cloud/videointelligence_v1p3beta1/proto/__init__.py",
+ "google/cloud/videointelligence_v1p3beta1/proto/video_intelligence.proto",
+ "google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py",
+ "google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2_grpc.py",
+ "google/cloud/videointelligence_v1p3beta1/types.py",
+ "noxfile.py",
+ "renovate.json",
+ "samples/AUTHORING_GUIDE.md",
+ "samples/CONTRIBUTING.md",
+ "samples/analyze/README.rst",
+ "samples/analyze/noxfile.py",
+ "scripts/decrypt-secrets.sh",
+ "scripts/readme-gen/readme_gen.py",
+ "scripts/readme-gen/templates/README.tmpl.rst",
+ "scripts/readme-gen/templates/auth.tmpl.rst",
+ "scripts/readme-gen/templates/auth_api_key.tmpl.rst",
+ "scripts/readme-gen/templates/install_deps.tmpl.rst",
+ "scripts/readme-gen/templates/install_portaudio.tmpl.rst",
+ "setup.cfg",
+ "testing/.gitignore",
+ "tests/unit/gapic/v1/test_video_intelligence_service_client_v1.py",
+ "tests/unit/gapic/v1beta2/test_video_intelligence_service_client_v1beta2.py",
+ "tests/unit/gapic/v1p1beta1/test_video_intelligence_service_client_v1p1beta1.py",
+ "tests/unit/gapic/v1p2beta1/test_video_intelligence_service_client_v1p2beta1.py",
+ "tests/unit/gapic/v1p3beta1/test_streaming_video_intelligence_service_client_v1p3beta1.py",
+ "tests/unit/gapic/v1p3beta1/test_video_intelligence_service_client_v1p3beta1.py"
]
}
\ No newline at end of file