diff --git a/google-cloud-video-intelligence/src/main/java/com/google/cloud/videointelligence/v1/stub/VideoIntelligenceServiceStubSettings.java b/google-cloud-video-intelligence/src/main/java/com/google/cloud/videointelligence/v1/stub/VideoIntelligenceServiceStubSettings.java index 8335fa0b8..ecae6140b 100644 --- a/google-cloud-video-intelligence/src/main/java/com/google/cloud/videointelligence/v1/stub/VideoIntelligenceServiceStubSettings.java +++ b/google-cloud-video-intelligence/src/main/java/com/google/cloud/videointelligence/v1/stub/VideoIntelligenceServiceStubSettings.java @@ -209,12 +209,12 @@ public static class Builder RetrySettings settings = null; settings = RetrySettings.newBuilder() - .setInitialRetryDelay(Duration.ofMillis(100L)) - .setRetryDelayMultiplier(1.3) - .setMaxRetryDelay(Duration.ofMillis(60000L)) - .setInitialRpcTimeout(Duration.ofMillis(20000L)) + .setInitialRetryDelay(Duration.ofMillis(1000L)) + .setRetryDelayMultiplier(2.5) + .setMaxRetryDelay(Duration.ofMillis(120000L)) + .setInitialRpcTimeout(Duration.ofMillis(120000L)) .setRpcTimeoutMultiplier(1.0) - .setMaxRpcTimeout(Duration.ofMillis(20000L)) + .setMaxRpcTimeout(Duration.ofMillis(120000L)) .setTotalTimeout(Duration.ofMillis(600000L)) .build(); definitions.put("default", settings); @@ -251,14 +251,14 @@ private static Builder initDefaults(Builder builder) { builder .annotateVideoSettings() - .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("non_idempotent")) + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("idempotent")) .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")); builder .annotateVideoOperationSettings() .setInitialCallSettings( UnaryCallSettings .newUnaryCallSettingsBuilder() - .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("non_idempotent")) + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("idempotent")) .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")) .build()) .setResponseTransformer( diff --git a/google-cloud-video-intelligence/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingVideoIntelligenceServiceClient.java b/google-cloud-video-intelligence/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingVideoIntelligenceServiceClient.java index 4245e1c60..d8910ed5e 100644 --- a/google-cloud-video-intelligence/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingVideoIntelligenceServiceClient.java +++ b/google-cloud-video-intelligence/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingVideoIntelligenceServiceClient.java @@ -26,7 +26,7 @@ // AUTO-GENERATED DOCUMENTATION AND SERVICE /** - * Service Description: Service that implements streaming Google Cloud Video Intelligence API. + * Service Description: Service that implements streaming Video Intelligence API. * *

This class provides the ability to make remote calls to the backing service through method * calls that map to API methods. Sample code to get started: diff --git a/google-cloud-video-intelligence/src/main/java/com/google/cloud/videointelligence/v1p3beta1/VideoIntelligenceServiceClient.java b/google-cloud-video-intelligence/src/main/java/com/google/cloud/videointelligence/v1p3beta1/VideoIntelligenceServiceClient.java index 0a21f6873..30c66159e 100644 --- a/google-cloud-video-intelligence/src/main/java/com/google/cloud/videointelligence/v1p3beta1/VideoIntelligenceServiceClient.java +++ b/google-cloud-video-intelligence/src/main/java/com/google/cloud/videointelligence/v1p3beta1/VideoIntelligenceServiceClient.java @@ -31,7 +31,7 @@ // AUTO-GENERATED DOCUMENTATION AND SERVICE /** - * Service Description: Service that implements Google Cloud Video Intelligence API. + * Service Description: Service that implements the Video Intelligence API. * *

This class provides the ability to make remote calls to the backing service through method * calls that map to API methods. Sample code to get started: @@ -185,15 +185,15 @@ public final OperationsClient getOperationsClient() { * } * * - * @param inputUri Input video location. Currently, only [Google Cloud - * Storage](https://cloud.google.com/storage/) URIs are supported, which must be specified in + * @param inputUri Input video location. Currently, only [Cloud + * Storage](https://cloud.google.com/storage/) URIs are supported. URIs must be specified in * the following format: `gs://bucket-id/object-id` (other URI formats return * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more - * information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). A - * video URI may include wildcards in `object-id`, and thus identify multiple videos. + * information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). + * To identify multiple videos, a video URI may include wildcards in the `object-id`. * Supported wildcards: '*' to match 0 or more characters; '?' to match 1 character. If * unset, the input video should be embedded in the request as `input_content`. If set, - * `input_content` should be unset. + * `input_content` must be unset. * @param features Required. Requested video annotation features. * @throws com.google.api.gax.rpc.ApiException if the remote call fails */ diff --git a/google-cloud-video-intelligence/src/main/java/com/google/cloud/videointelligence/v1p3beta1/package-info.java b/google-cloud-video-intelligence/src/main/java/com/google/cloud/videointelligence/v1p3beta1/package-info.java index f905dcc90..ccd8d2c8b 100644 --- a/google-cloud-video-intelligence/src/main/java/com/google/cloud/videointelligence/v1p3beta1/package-info.java +++ b/google-cloud-video-intelligence/src/main/java/com/google/cloud/videointelligence/v1p3beta1/package-info.java @@ -21,7 +21,7 @@ * *

============================== VideoIntelligenceServiceClient ============================== * - *

Service Description: Service that implements Google Cloud Video Intelligence API. + *

Service Description: Service that implements the Video Intelligence API. * *

Sample for VideoIntelligenceServiceClient: * @@ -39,7 +39,7 @@ * ======================================= StreamingVideoIntelligenceServiceClient * ======================================= * - *

Service Description: Service that implements streaming Google Cloud Video Intelligence API. + *

Service Description: Service that implements streaming Video Intelligence API. * *

Sample for StreamingVideoIntelligenceServiceClient: * diff --git a/grpc-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingVideoIntelligenceServiceGrpc.java b/grpc-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingVideoIntelligenceServiceGrpc.java index 161b9566a..fe7713169 100644 --- a/grpc-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingVideoIntelligenceServiceGrpc.java +++ b/grpc-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingVideoIntelligenceServiceGrpc.java @@ -24,7 +24,7 @@ * * *

- * Service that implements streaming Google Cloud Video Intelligence API.
+ * Service that implements streaming Video Intelligence API.
  * 
*/ @javax.annotation.Generated( @@ -140,7 +140,7 @@ public StreamingVideoIntelligenceServiceFutureStub newStub( * * *
-   * Service that implements streaming Google Cloud Video Intelligence API.
+   * Service that implements streaming Video Intelligence API.
    * 
*/ public abstract static class StreamingVideoIntelligenceServiceImplBase @@ -182,7 +182,7 @@ public final io.grpc.ServerServiceDefinition bindService() { * * *
-   * Service that implements streaming Google Cloud Video Intelligence API.
+   * Service that implements streaming Video Intelligence API.
    * 
*/ public static final class StreamingVideoIntelligenceServiceStub @@ -223,7 +223,7 @@ protected StreamingVideoIntelligenceServiceStub build( * * *
-   * Service that implements streaming Google Cloud Video Intelligence API.
+   * Service that implements streaming Video Intelligence API.
    * 
*/ public static final class StreamingVideoIntelligenceServiceBlockingStub @@ -244,7 +244,7 @@ protected StreamingVideoIntelligenceServiceBlockingStub build( * * *
-   * Service that implements streaming Google Cloud Video Intelligence API.
+   * Service that implements streaming Video Intelligence API.
    * 
*/ public static final class StreamingVideoIntelligenceServiceFutureStub diff --git a/grpc-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/VideoIntelligenceServiceGrpc.java b/grpc-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/VideoIntelligenceServiceGrpc.java index 4bf1f7d4d..7922b43be 100644 --- a/grpc-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/VideoIntelligenceServiceGrpc.java +++ b/grpc-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/VideoIntelligenceServiceGrpc.java @@ -26,7 +26,7 @@ * * *
- * Service that implements Google Cloud Video Intelligence API.
+ * Service that implements the Video Intelligence API.
  * 
*/ @javax.annotation.Generated( @@ -132,7 +132,7 @@ public VideoIntelligenceServiceFutureStub newStub( * * *
-   * Service that implements Google Cloud Video Intelligence API.
+   * Service that implements the Video Intelligence API.
    * 
*/ public abstract static class VideoIntelligenceServiceImplBase implements io.grpc.BindableService { @@ -170,7 +170,7 @@ public final io.grpc.ServerServiceDefinition bindService() { * * *
-   * Service that implements Google Cloud Video Intelligence API.
+   * Service that implements the Video Intelligence API.
    * 
*/ public static final class VideoIntelligenceServiceStub @@ -209,7 +209,7 @@ public void annotateVideo( * * *
-   * Service that implements Google Cloud Video Intelligence API.
+   * Service that implements the Video Intelligence API.
    * 
*/ public static final class VideoIntelligenceServiceBlockingStub @@ -245,7 +245,7 @@ public com.google.longrunning.Operation annotateVideo( * * *
-   * Service that implements Google Cloud Video Intelligence API.
+   * Service that implements the Video Intelligence API.
    * 
*/ public static final class VideoIntelligenceServiceFutureStub diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/AnnotateVideoRequest.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/AnnotateVideoRequest.java index c2e35ffbc..04eb66715 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/AnnotateVideoRequest.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/AnnotateVideoRequest.java @@ -187,15 +187,16 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * *
    * Input video location. Currently, only
-   * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
-   * supported, which must be specified in the following format:
+   * [Cloud Storage](https://cloud.google.com/storage/) URIs are
+   * supported. URIs must be specified in the following format:
    * `gs://bucket-id/object-id` (other URI formats return
    * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
-   * more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). A video
-   * URI may include wildcards in `object-id`, and thus identify multiple
-   * videos. Supported wildcards: '*' to match 0 or more characters;
+   * more information, see [Request
+   * URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
+   * multiple videos, a video URI may include wildcards in the `object-id`.
+   * Supported wildcards: '*' to match 0 or more characters;
    * '?' to match 1 character. If unset, the input video should be embedded
-   * in the request as `input_content`. If set, `input_content` should be unset.
+   * in the request as `input_content`. If set, `input_content` must be unset.
    * 
* * string input_uri = 1; @@ -218,15 +219,16 @@ public java.lang.String getInputUri() { * *
    * Input video location. Currently, only
-   * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
-   * supported, which must be specified in the following format:
+   * [Cloud Storage](https://cloud.google.com/storage/) URIs are
+   * supported. URIs must be specified in the following format:
    * `gs://bucket-id/object-id` (other URI formats return
    * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
-   * more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). A video
-   * URI may include wildcards in `object-id`, and thus identify multiple
-   * videos. Supported wildcards: '*' to match 0 or more characters;
+   * more information, see [Request
+   * URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
+   * multiple videos, a video URI may include wildcards in the `object-id`.
+   * Supported wildcards: '*' to match 0 or more characters;
    * '?' to match 1 character. If unset, the input video should be embedded
-   * in the request as `input_content`. If set, `input_content` should be unset.
+   * in the request as `input_content`. If set, `input_content` must be unset.
    * 
* * string input_uri = 1; @@ -252,8 +254,8 @@ public com.google.protobuf.ByteString getInputUriBytes() { * *
    * The video data bytes.
-   * If unset, the input video(s) should be specified via `input_uri`.
-   * If set, `input_uri` should be unset.
+   * If unset, the input video(s) should be specified via the `input_uri`.
+   * If set, `input_uri` must be unset.
    * 
* * bytes input_content = 6; @@ -421,11 +423,12 @@ public com.google.cloud.videointelligence.v1p3beta1.VideoContext getVideoContext * *
    * Optional. Location where the output (in JSON format) should be stored.
-   * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
-   * URIs are supported, which must be specified in the following format:
+   * Currently, only [Cloud Storage](https://cloud.google.com/storage/)
+   * URIs are supported. These must be specified in the following format:
    * `gs://bucket-id/object-id` (other URI formats return
    * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
-   * more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
+   * more information, see [Request
+   * URIs](https://cloud.google.com/storage/docs/request-endpoints).
    * 
* * string output_uri = 4 [(.google.api.field_behavior) = OPTIONAL]; @@ -448,11 +451,12 @@ public java.lang.String getOutputUri() { * *
    * Optional. Location where the output (in JSON format) should be stored.
-   * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
-   * URIs are supported, which must be specified in the following format:
+   * Currently, only [Cloud Storage](https://cloud.google.com/storage/)
+   * URIs are supported. These must be specified in the following format:
    * `gs://bucket-id/object-id` (other URI formats return
    * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
-   * more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
+   * more information, see [Request
+   * URIs](https://cloud.google.com/storage/docs/request-endpoints).
    * 
* * string output_uri = 4 [(.google.api.field_behavior) = OPTIONAL]; @@ -478,8 +482,9 @@ public com.google.protobuf.ByteString getOutputUriBytes() { * *
    * Optional. Cloud region where annotation should take place. Supported cloud
-   * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
-   * is specified, a region will be determined based on video file location.
+   * regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
+   * region is specified, the region will be determined based on video file
+   * location.
    * 
* * string location_id = 5 [(.google.api.field_behavior) = OPTIONAL]; @@ -502,8 +507,9 @@ public java.lang.String getLocationId() { * *
    * Optional. Cloud region where annotation should take place. Supported cloud
-   * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
-   * is specified, a region will be determined based on video file location.
+   * regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
+   * region is specified, the region will be determined based on video file
+   * location.
    * 
* * string location_id = 5 [(.google.api.field_behavior) = OPTIONAL]; @@ -973,15 +979,16 @@ public Builder mergeFrom( * *
      * Input video location. Currently, only
-     * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
-     * supported, which must be specified in the following format:
+     * [Cloud Storage](https://cloud.google.com/storage/) URIs are
+     * supported. URIs must be specified in the following format:
      * `gs://bucket-id/object-id` (other URI formats return
      * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
-     * more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). A video
-     * URI may include wildcards in `object-id`, and thus identify multiple
-     * videos. Supported wildcards: '*' to match 0 or more characters;
+     * more information, see [Request
+     * URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
+     * multiple videos, a video URI may include wildcards in the `object-id`.
+     * Supported wildcards: '*' to match 0 or more characters;
      * '?' to match 1 character. If unset, the input video should be embedded
-     * in the request as `input_content`. If set, `input_content` should be unset.
+     * in the request as `input_content`. If set, `input_content` must be unset.
      * 
* * string input_uri = 1; @@ -1004,15 +1011,16 @@ public java.lang.String getInputUri() { * *
      * Input video location. Currently, only
-     * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
-     * supported, which must be specified in the following format:
+     * [Cloud Storage](https://cloud.google.com/storage/) URIs are
+     * supported. URIs must be specified in the following format:
      * `gs://bucket-id/object-id` (other URI formats return
      * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
-     * more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). A video
-     * URI may include wildcards in `object-id`, and thus identify multiple
-     * videos. Supported wildcards: '*' to match 0 or more characters;
+     * more information, see [Request
+     * URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
+     * multiple videos, a video URI may include wildcards in the `object-id`.
+     * Supported wildcards: '*' to match 0 or more characters;
      * '?' to match 1 character. If unset, the input video should be embedded
-     * in the request as `input_content`. If set, `input_content` should be unset.
+     * in the request as `input_content`. If set, `input_content` must be unset.
      * 
* * string input_uri = 1; @@ -1035,15 +1043,16 @@ public com.google.protobuf.ByteString getInputUriBytes() { * *
      * Input video location. Currently, only
-     * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
-     * supported, which must be specified in the following format:
+     * [Cloud Storage](https://cloud.google.com/storage/) URIs are
+     * supported. URIs must be specified in the following format:
      * `gs://bucket-id/object-id` (other URI formats return
      * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
-     * more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). A video
-     * URI may include wildcards in `object-id`, and thus identify multiple
-     * videos. Supported wildcards: '*' to match 0 or more characters;
+     * more information, see [Request
+     * URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
+     * multiple videos, a video URI may include wildcards in the `object-id`.
+     * Supported wildcards: '*' to match 0 or more characters;
      * '?' to match 1 character. If unset, the input video should be embedded
-     * in the request as `input_content`. If set, `input_content` should be unset.
+     * in the request as `input_content`. If set, `input_content` must be unset.
      * 
* * string input_uri = 1; @@ -1065,15 +1074,16 @@ public Builder setInputUri(java.lang.String value) { * *
      * Input video location. Currently, only
-     * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
-     * supported, which must be specified in the following format:
+     * [Cloud Storage](https://cloud.google.com/storage/) URIs are
+     * supported. URIs must be specified in the following format:
      * `gs://bucket-id/object-id` (other URI formats return
      * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
-     * more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). A video
-     * URI may include wildcards in `object-id`, and thus identify multiple
-     * videos. Supported wildcards: '*' to match 0 or more characters;
+     * more information, see [Request
+     * URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
+     * multiple videos, a video URI may include wildcards in the `object-id`.
+     * Supported wildcards: '*' to match 0 or more characters;
      * '?' to match 1 character. If unset, the input video should be embedded
-     * in the request as `input_content`. If set, `input_content` should be unset.
+     * in the request as `input_content`. If set, `input_content` must be unset.
      * 
* * string input_uri = 1; @@ -1091,15 +1101,16 @@ public Builder clearInputUri() { * *
      * Input video location. Currently, only
-     * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
-     * supported, which must be specified in the following format:
+     * [Cloud Storage](https://cloud.google.com/storage/) URIs are
+     * supported. URIs must be specified in the following format:
      * `gs://bucket-id/object-id` (other URI formats return
      * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
-     * more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). A video
-     * URI may include wildcards in `object-id`, and thus identify multiple
-     * videos. Supported wildcards: '*' to match 0 or more characters;
+     * more information, see [Request
+     * URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
+     * multiple videos, a video URI may include wildcards in the `object-id`.
+     * Supported wildcards: '*' to match 0 or more characters;
      * '?' to match 1 character. If unset, the input video should be embedded
-     * in the request as `input_content`. If set, `input_content` should be unset.
+     * in the request as `input_content`. If set, `input_content` must be unset.
      * 
* * string input_uri = 1; @@ -1124,8 +1135,8 @@ public Builder setInputUriBytes(com.google.protobuf.ByteString value) { * *
      * The video data bytes.
-     * If unset, the input video(s) should be specified via `input_uri`.
-     * If set, `input_uri` should be unset.
+     * If unset, the input video(s) should be specified via the `input_uri`.
+     * If set, `input_uri` must be unset.
      * 
* * bytes input_content = 6; @@ -1140,8 +1151,8 @@ public com.google.protobuf.ByteString getInputContent() { * *
      * The video data bytes.
-     * If unset, the input video(s) should be specified via `input_uri`.
-     * If set, `input_uri` should be unset.
+     * If unset, the input video(s) should be specified via the `input_uri`.
+     * If set, `input_uri` must be unset.
      * 
* * bytes input_content = 6; @@ -1163,8 +1174,8 @@ public Builder setInputContent(com.google.protobuf.ByteString value) { * *
      * The video data bytes.
-     * If unset, the input video(s) should be specified via `input_uri`.
-     * If set, `input_uri` should be unset.
+     * If unset, the input video(s) should be specified via the `input_uri`.
+     * If set, `input_uri` must be unset.
      * 
* * bytes input_content = 6; @@ -1620,11 +1631,12 @@ public Builder clearVideoContext() { * *
      * Optional. Location where the output (in JSON format) should be stored.
-     * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
-     * URIs are supported, which must be specified in the following format:
+     * Currently, only [Cloud Storage](https://cloud.google.com/storage/)
+     * URIs are supported. These must be specified in the following format:
      * `gs://bucket-id/object-id` (other URI formats return
      * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
-     * more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
+     * more information, see [Request
+     * URIs](https://cloud.google.com/storage/docs/request-endpoints).
      * 
* * string output_uri = 4 [(.google.api.field_behavior) = OPTIONAL]; @@ -1647,11 +1659,12 @@ public java.lang.String getOutputUri() { * *
      * Optional. Location where the output (in JSON format) should be stored.
-     * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
-     * URIs are supported, which must be specified in the following format:
+     * Currently, only [Cloud Storage](https://cloud.google.com/storage/)
+     * URIs are supported. These must be specified in the following format:
      * `gs://bucket-id/object-id` (other URI formats return
      * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
-     * more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
+     * more information, see [Request
+     * URIs](https://cloud.google.com/storage/docs/request-endpoints).
      * 
* * string output_uri = 4 [(.google.api.field_behavior) = OPTIONAL]; @@ -1674,11 +1687,12 @@ public com.google.protobuf.ByteString getOutputUriBytes() { * *
      * Optional. Location where the output (in JSON format) should be stored.
-     * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
-     * URIs are supported, which must be specified in the following format:
+     * Currently, only [Cloud Storage](https://cloud.google.com/storage/)
+     * URIs are supported. These must be specified in the following format:
      * `gs://bucket-id/object-id` (other URI formats return
      * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
-     * more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
+     * more information, see [Request
+     * URIs](https://cloud.google.com/storage/docs/request-endpoints).
      * 
* * string output_uri = 4 [(.google.api.field_behavior) = OPTIONAL]; @@ -1700,11 +1714,12 @@ public Builder setOutputUri(java.lang.String value) { * *
      * Optional. Location where the output (in JSON format) should be stored.
-     * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
-     * URIs are supported, which must be specified in the following format:
+     * Currently, only [Cloud Storage](https://cloud.google.com/storage/)
+     * URIs are supported. These must be specified in the following format:
      * `gs://bucket-id/object-id` (other URI formats return
      * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
-     * more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
+     * more information, see [Request
+     * URIs](https://cloud.google.com/storage/docs/request-endpoints).
      * 
* * string output_uri = 4 [(.google.api.field_behavior) = OPTIONAL]; @@ -1722,11 +1737,12 @@ public Builder clearOutputUri() { * *
      * Optional. Location where the output (in JSON format) should be stored.
-     * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
-     * URIs are supported, which must be specified in the following format:
+     * Currently, only [Cloud Storage](https://cloud.google.com/storage/)
+     * URIs are supported. These must be specified in the following format:
      * `gs://bucket-id/object-id` (other URI formats return
      * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
-     * more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
+     * more information, see [Request
+     * URIs](https://cloud.google.com/storage/docs/request-endpoints).
      * 
* * string output_uri = 4 [(.google.api.field_behavior) = OPTIONAL]; @@ -1751,8 +1767,9 @@ public Builder setOutputUriBytes(com.google.protobuf.ByteString value) { * *
      * Optional. Cloud region where annotation should take place. Supported cloud
-     * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
-     * is specified, a region will be determined based on video file location.
+     * regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
+     * region is specified, the region will be determined based on video file
+     * location.
      * 
* * string location_id = 5 [(.google.api.field_behavior) = OPTIONAL]; @@ -1775,8 +1792,9 @@ public java.lang.String getLocationId() { * *
      * Optional. Cloud region where annotation should take place. Supported cloud
-     * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
-     * is specified, a region will be determined based on video file location.
+     * regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
+     * region is specified, the region will be determined based on video file
+     * location.
      * 
* * string location_id = 5 [(.google.api.field_behavior) = OPTIONAL]; @@ -1799,8 +1817,9 @@ public com.google.protobuf.ByteString getLocationIdBytes() { * *
      * Optional. Cloud region where annotation should take place. Supported cloud
-     * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
-     * is specified, a region will be determined based on video file location.
+     * regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
+     * region is specified, the region will be determined based on video file
+     * location.
      * 
* * string location_id = 5 [(.google.api.field_behavior) = OPTIONAL]; @@ -1822,8 +1841,9 @@ public Builder setLocationId(java.lang.String value) { * *
      * Optional. Cloud region where annotation should take place. Supported cloud
-     * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
-     * is specified, a region will be determined based on video file location.
+     * regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
+     * region is specified, the region will be determined based on video file
+     * location.
      * 
* * string location_id = 5 [(.google.api.field_behavior) = OPTIONAL]; @@ -1841,8 +1861,9 @@ public Builder clearLocationId() { * *
      * Optional. Cloud region where annotation should take place. Supported cloud
-     * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
-     * is specified, a region will be determined based on video file location.
+     * regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
+     * region is specified, the region will be determined based on video file
+     * location.
      * 
* * string location_id = 5 [(.google.api.field_behavior) = OPTIONAL]; diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/AnnotateVideoRequestOrBuilder.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/AnnotateVideoRequestOrBuilder.java index e06de7ca7..725f2fd07 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/AnnotateVideoRequestOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/AnnotateVideoRequestOrBuilder.java @@ -28,15 +28,16 @@ public interface AnnotateVideoRequestOrBuilder * *
    * Input video location. Currently, only
-   * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
-   * supported, which must be specified in the following format:
+   * [Cloud Storage](https://cloud.google.com/storage/) URIs are
+   * supported. URIs must be specified in the following format:
    * `gs://bucket-id/object-id` (other URI formats return
    * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
-   * more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). A video
-   * URI may include wildcards in `object-id`, and thus identify multiple
-   * videos. Supported wildcards: '*' to match 0 or more characters;
+   * more information, see [Request
+   * URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
+   * multiple videos, a video URI may include wildcards in the `object-id`.
+   * Supported wildcards: '*' to match 0 or more characters;
    * '?' to match 1 character. If unset, the input video should be embedded
-   * in the request as `input_content`. If set, `input_content` should be unset.
+   * in the request as `input_content`. If set, `input_content` must be unset.
    * 
* * string input_uri = 1; @@ -49,15 +50,16 @@ public interface AnnotateVideoRequestOrBuilder * *
    * Input video location. Currently, only
-   * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
-   * supported, which must be specified in the following format:
+   * [Cloud Storage](https://cloud.google.com/storage/) URIs are
+   * supported. URIs must be specified in the following format:
    * `gs://bucket-id/object-id` (other URI formats return
    * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
-   * more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). A video
-   * URI may include wildcards in `object-id`, and thus identify multiple
-   * videos. Supported wildcards: '*' to match 0 or more characters;
+   * more information, see [Request
+   * URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
+   * multiple videos, a video URI may include wildcards in the `object-id`.
+   * Supported wildcards: '*' to match 0 or more characters;
    * '?' to match 1 character. If unset, the input video should be embedded
-   * in the request as `input_content`. If set, `input_content` should be unset.
+   * in the request as `input_content`. If set, `input_content` must be unset.
    * 
* * string input_uri = 1; @@ -71,8 +73,8 @@ public interface AnnotateVideoRequestOrBuilder * *
    * The video data bytes.
-   * If unset, the input video(s) should be specified via `input_uri`.
-   * If set, `input_uri` should be unset.
+   * If unset, the input video(s) should be specified via the `input_uri`.
+   * If set, `input_uri` must be unset.
    * 
* * bytes input_content = 6; @@ -194,11 +196,12 @@ public interface AnnotateVideoRequestOrBuilder * *
    * Optional. Location where the output (in JSON format) should be stored.
-   * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
-   * URIs are supported, which must be specified in the following format:
+   * Currently, only [Cloud Storage](https://cloud.google.com/storage/)
+   * URIs are supported. These must be specified in the following format:
    * `gs://bucket-id/object-id` (other URI formats return
    * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
-   * more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
+   * more information, see [Request
+   * URIs](https://cloud.google.com/storage/docs/request-endpoints).
    * 
* * string output_uri = 4 [(.google.api.field_behavior) = OPTIONAL]; @@ -211,11 +214,12 @@ public interface AnnotateVideoRequestOrBuilder * *
    * Optional. Location where the output (in JSON format) should be stored.
-   * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
-   * URIs are supported, which must be specified in the following format:
+   * Currently, only [Cloud Storage](https://cloud.google.com/storage/)
+   * URIs are supported. These must be specified in the following format:
    * `gs://bucket-id/object-id` (other URI formats return
    * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
-   * more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
+   * more information, see [Request
+   * URIs](https://cloud.google.com/storage/docs/request-endpoints).
    * 
* * string output_uri = 4 [(.google.api.field_behavior) = OPTIONAL]; @@ -229,8 +233,9 @@ public interface AnnotateVideoRequestOrBuilder * *
    * Optional. Cloud region where annotation should take place. Supported cloud
-   * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
-   * is specified, a region will be determined based on video file location.
+   * regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
+   * region is specified, the region will be determined based on video file
+   * location.
    * 
* * string location_id = 5 [(.google.api.field_behavior) = OPTIONAL]; @@ -243,8 +248,9 @@ public interface AnnotateVideoRequestOrBuilder * *
    * Optional. Cloud region where annotation should take place. Supported cloud
-   * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
-   * is specified, a region will be determined based on video file location.
+   * regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
+   * region is specified, the region will be determined based on video file
+   * location.
    * 
* * string location_id = 5 [(.google.api.field_behavior) = OPTIONAL]; diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/DetectedAttribute.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/DetectedAttribute.java index 775a36999..32782dcce 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/DetectedAttribute.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/DetectedAttribute.java @@ -130,7 +130,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-   * The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.
+   * The name of the attribute, for example, glasses, dark_glasses, mouth_open.
    * A full list of supported type names will be provided in the document.
    * 
* @@ -153,7 +153,7 @@ public java.lang.String getName() { * * *
-   * The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.
+   * The name of the attribute, for example, glasses, dark_glasses, mouth_open.
    * A full list of supported type names will be provided in the document.
    * 
* @@ -598,7 +598,7 @@ public Builder mergeFrom( * * *
-     * The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.
+     * The name of the attribute, for example, glasses, dark_glasses, mouth_open.
      * A full list of supported type names will be provided in the document.
      * 
* @@ -621,7 +621,7 @@ public java.lang.String getName() { * * *
-     * The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.
+     * The name of the attribute, for example, glasses, dark_glasses, mouth_open.
      * A full list of supported type names will be provided in the document.
      * 
* @@ -644,7 +644,7 @@ public com.google.protobuf.ByteString getNameBytes() { * * *
-     * The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.
+     * The name of the attribute, for example, glasses, dark_glasses, mouth_open.
      * A full list of supported type names will be provided in the document.
      * 
* @@ -666,7 +666,7 @@ public Builder setName(java.lang.String value) { * * *
-     * The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.
+     * The name of the attribute, for example, glasses, dark_glasses, mouth_open.
      * A full list of supported type names will be provided in the document.
      * 
* @@ -684,7 +684,7 @@ public Builder clearName() { * * *
-     * The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.
+     * The name of the attribute, for example, glasses, dark_glasses, mouth_open.
      * A full list of supported type names will be provided in the document.
      * 
* diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/DetectedAttributeOrBuilder.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/DetectedAttributeOrBuilder.java index f7b2a6477..e996cda08 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/DetectedAttributeOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/DetectedAttributeOrBuilder.java @@ -27,7 +27,7 @@ public interface DetectedAttributeOrBuilder * * *
-   * The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.
+   * The name of the attribute, for example, glasses, dark_glasses, mouth_open.
    * A full list of supported type names will be provided in the document.
    * 
* @@ -40,7 +40,7 @@ public interface DetectedAttributeOrBuilder * * *
-   * The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.
+   * The name of the attribute, for example, glasses, dark_glasses, mouth_open.
    * A full list of supported type names will be provided in the document.
    * 
* diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/DetectedLandmark.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/DetectedLandmark.java index 1f97cd3ff..3ba4e013d 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/DetectedLandmark.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/DetectedLandmark.java @@ -141,7 +141,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-   * The name of this landmark, i.e. left_hand, right_shoulder.
+   * The name of this landmark, for example, left_hand, right_shoulder.
    * 
* * string name = 1; @@ -163,7 +163,7 @@ public java.lang.String getName() { * * *
-   * The name of this landmark, i.e. left_hand, right_shoulder.
+   * The name of this landmark, for example, left_hand, right_shoulder.
    * 
* * string name = 1; @@ -620,7 +620,7 @@ public Builder mergeFrom( * * *
-     * The name of this landmark, i.e. left_hand, right_shoulder.
+     * The name of this landmark, for example, left_hand, right_shoulder.
      * 
* * string name = 1; @@ -642,7 +642,7 @@ public java.lang.String getName() { * * *
-     * The name of this landmark, i.e. left_hand, right_shoulder.
+     * The name of this landmark, for example, left_hand, right_shoulder.
      * 
* * string name = 1; @@ -664,7 +664,7 @@ public com.google.protobuf.ByteString getNameBytes() { * * *
-     * The name of this landmark, i.e. left_hand, right_shoulder.
+     * The name of this landmark, for example, left_hand, right_shoulder.
      * 
* * string name = 1; @@ -685,7 +685,7 @@ public Builder setName(java.lang.String value) { * * *
-     * The name of this landmark, i.e. left_hand, right_shoulder.
+     * The name of this landmark, for example, left_hand, right_shoulder.
      * 
* * string name = 1; @@ -702,7 +702,7 @@ public Builder clearName() { * * *
-     * The name of this landmark, i.e. left_hand, right_shoulder.
+     * The name of this landmark, for example, left_hand, right_shoulder.
      * 
* * string name = 1; diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/DetectedLandmarkOrBuilder.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/DetectedLandmarkOrBuilder.java index 7a12f94b4..c290b666e 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/DetectedLandmarkOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/DetectedLandmarkOrBuilder.java @@ -27,7 +27,7 @@ public interface DetectedLandmarkOrBuilder * * *
-   * The name of this landmark, i.e. left_hand, right_shoulder.
+   * The name of this landmark, for example, left_hand, right_shoulder.
    * 
* * string name = 1; @@ -39,7 +39,7 @@ public interface DetectedLandmarkOrBuilder * * *
-   * The name of this landmark, i.e. left_hand, right_shoulder.
+   * The name of this landmark, for example, left_hand, right_shoulder.
    * 
* * string name = 1; diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/Entity.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/Entity.java index dbf8308b4..0f040541e 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/Entity.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/Entity.java @@ -184,7 +184,7 @@ public com.google.protobuf.ByteString getEntityIdBytes() { * * *
-   * Textual description, e.g. `Fixed-gear bicycle`.
+   * Textual description, e.g., `Fixed-gear bicycle`.
    * 
* * string description = 2; @@ -206,7 +206,7 @@ public java.lang.String getDescription() { * * *
-   * Textual description, e.g. `Fixed-gear bicycle`.
+   * Textual description, e.g., `Fixed-gear bicycle`.
    * 
* * string description = 2; @@ -743,7 +743,7 @@ public Builder setEntityIdBytes(com.google.protobuf.ByteString value) { * * *
-     * Textual description, e.g. `Fixed-gear bicycle`.
+     * Textual description, e.g., `Fixed-gear bicycle`.
      * 
* * string description = 2; @@ -765,7 +765,7 @@ public java.lang.String getDescription() { * * *
-     * Textual description, e.g. `Fixed-gear bicycle`.
+     * Textual description, e.g., `Fixed-gear bicycle`.
      * 
* * string description = 2; @@ -787,7 +787,7 @@ public com.google.protobuf.ByteString getDescriptionBytes() { * * *
-     * Textual description, e.g. `Fixed-gear bicycle`.
+     * Textual description, e.g., `Fixed-gear bicycle`.
      * 
* * string description = 2; @@ -808,7 +808,7 @@ public Builder setDescription(java.lang.String value) { * * *
-     * Textual description, e.g. `Fixed-gear bicycle`.
+     * Textual description, e.g., `Fixed-gear bicycle`.
      * 
* * string description = 2; @@ -825,7 +825,7 @@ public Builder clearDescription() { * * *
-     * Textual description, e.g. `Fixed-gear bicycle`.
+     * Textual description, e.g., `Fixed-gear bicycle`.
      * 
* * string description = 2; diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/EntityOrBuilder.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/EntityOrBuilder.java index 42da9620b..714ef9db4 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/EntityOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/EntityOrBuilder.java @@ -56,7 +56,7 @@ public interface EntityOrBuilder * * *
-   * Textual description, e.g. `Fixed-gear bicycle`.
+   * Textual description, e.g., `Fixed-gear bicycle`.
    * 
* * string description = 2; @@ -68,7 +68,7 @@ public interface EntityOrBuilder * * *
-   * Textual description, e.g. `Fixed-gear bicycle`.
+   * Textual description, e.g., `Fixed-gear bicycle`.
    * 
* * string description = 2; diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/FaceDetectionConfig.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/FaceDetectionConfig.java index 715524bf4..3007cc6be 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/FaceDetectionConfig.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/FaceDetectionConfig.java @@ -178,7 +178,7 @@ public com.google.protobuf.ByteString getModelBytes() { * * *
-   * Whether bounding boxes be included in the face annotation output.
+   * Whether bounding boxes are included in the face annotation output.
    * 
* * bool include_bounding_boxes = 2; @@ -196,7 +196,7 @@ public boolean getIncludeBoundingBoxes() { * *
    * Whether to enable face attributes detection, such as glasses, dark_glasses,
-   * mouth_open etc. Ignored if 'include_bounding_boxes' is false.
+   * mouth_open etc. Ignored if 'include_bounding_boxes' is set to false.
    * 
* * bool include_attributes = 5; @@ -681,7 +681,7 @@ public Builder setModelBytes(com.google.protobuf.ByteString value) { * * *
-     * Whether bounding boxes be included in the face annotation output.
+     * Whether bounding boxes are included in the face annotation output.
      * 
* * bool include_bounding_boxes = 2; @@ -695,7 +695,7 @@ public boolean getIncludeBoundingBoxes() { * * *
-     * Whether bounding boxes be included in the face annotation output.
+     * Whether bounding boxes are included in the face annotation output.
      * 
* * bool include_bounding_boxes = 2; @@ -713,7 +713,7 @@ public Builder setIncludeBoundingBoxes(boolean value) { * * *
-     * Whether bounding boxes be included in the face annotation output.
+     * Whether bounding boxes are included in the face annotation output.
      * 
* * bool include_bounding_boxes = 2; @@ -733,7 +733,7 @@ public Builder clearIncludeBoundingBoxes() { * *
      * Whether to enable face attributes detection, such as glasses, dark_glasses,
-     * mouth_open etc. Ignored if 'include_bounding_boxes' is false.
+     * mouth_open etc. Ignored if 'include_bounding_boxes' is set to false.
      * 
* * bool include_attributes = 5; @@ -748,7 +748,7 @@ public boolean getIncludeAttributes() { * *
      * Whether to enable face attributes detection, such as glasses, dark_glasses,
-     * mouth_open etc. Ignored if 'include_bounding_boxes' is false.
+     * mouth_open etc. Ignored if 'include_bounding_boxes' is set to false.
      * 
* * bool include_attributes = 5; @@ -767,7 +767,7 @@ public Builder setIncludeAttributes(boolean value) { * *
      * Whether to enable face attributes detection, such as glasses, dark_glasses,
-     * mouth_open etc. Ignored if 'include_bounding_boxes' is false.
+     * mouth_open etc. Ignored if 'include_bounding_boxes' is set to false.
      * 
* * bool include_attributes = 5; diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/FaceDetectionConfigOrBuilder.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/FaceDetectionConfigOrBuilder.java index 58580470f..4b0c8e321 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/FaceDetectionConfigOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/FaceDetectionConfigOrBuilder.java @@ -56,7 +56,7 @@ public interface FaceDetectionConfigOrBuilder * * *
-   * Whether bounding boxes be included in the face annotation output.
+   * Whether bounding boxes are included in the face annotation output.
    * 
* * bool include_bounding_boxes = 2; @@ -70,7 +70,7 @@ public interface FaceDetectionConfigOrBuilder * *
    * Whether to enable face attributes detection, such as glasses, dark_glasses,
-   * mouth_open etc. Ignored if 'include_bounding_boxes' is false.
+   * mouth_open etc. Ignored if 'include_bounding_boxes' is set to false.
    * 
* * bool include_attributes = 5; diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/Feature.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/Feature.java index d290ffbe9..8c9c27f3b 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/Feature.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/Feature.java @@ -326,7 +326,7 @@ public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor return com.google.cloud.videointelligence.v1p3beta1.VideoIntelligenceServiceProto .getDescriptor() .getEnumTypes() - .get(0); + .get(3); } private static final Feature[] VALUES = values(); diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/LabelAnnotation.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/LabelAnnotation.java index a6772796d..e1b8095bb 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/LabelAnnotation.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/LabelAnnotation.java @@ -226,9 +226,9 @@ public com.google.cloud.videointelligence.v1p3beta1.EntityOrBuilder getEntityOrB * *
    * Common categories for the detected entity.
-   * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-   * cases there might be more than one categories e.g. `Terrier` could also be
-   * a `pet`.
+   * For example, when the label is `Terrier`, the category is likely `dog`. And
+   * in some cases there might be more than one categories e.g., `Terrier` could
+   * also be a `pet`.
    * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Entity category_entities = 2; @@ -242,9 +242,9 @@ public com.google.cloud.videointelligence.v1p3beta1.EntityOrBuilder getEntityOrB * *
    * Common categories for the detected entity.
-   * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-   * cases there might be more than one categories e.g. `Terrier` could also be
-   * a `pet`.
+   * For example, when the label is `Terrier`, the category is likely `dog`. And
+   * in some cases there might be more than one categories e.g., `Terrier` could
+   * also be a `pet`.
    * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Entity category_entities = 2; @@ -258,9 +258,9 @@ public com.google.cloud.videointelligence.v1p3beta1.EntityOrBuilder getEntityOrB * *
    * Common categories for the detected entity.
-   * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-   * cases there might be more than one categories e.g. `Terrier` could also be
-   * a `pet`.
+   * For example, when the label is `Terrier`, the category is likely `dog`. And
+   * in some cases there might be more than one categories e.g., `Terrier` could
+   * also be a `pet`.
    * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Entity category_entities = 2; @@ -273,9 +273,9 @@ public int getCategoryEntitiesCount() { * *
    * Common categories for the detected entity.
-   * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-   * cases there might be more than one categories e.g. `Terrier` could also be
-   * a `pet`.
+   * For example, when the label is `Terrier`, the category is likely `dog`. And
+   * in some cases there might be more than one categories e.g., `Terrier` could
+   * also be a `pet`.
    * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Entity category_entities = 2; @@ -288,9 +288,9 @@ public com.google.cloud.videointelligence.v1p3beta1.Entity getCategoryEntities(i * *
    * Common categories for the detected entity.
-   * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-   * cases there might be more than one categories e.g. `Terrier` could also be
-   * a `pet`.
+   * For example, when the label is `Terrier`, the category is likely `dog`. And
+   * in some cases there might be more than one categories e.g., `Terrier` could
+   * also be a `pet`.
    * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Entity category_entities = 2; @@ -1140,9 +1140,9 @@ private void ensureCategoryEntitiesIsMutable() { * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Entity category_entities = 2; @@ -1160,9 +1160,9 @@ private void ensureCategoryEntitiesIsMutable() { * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Entity category_entities = 2; @@ -1179,9 +1179,9 @@ public int getCategoryEntitiesCount() { * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Entity category_entities = 2; @@ -1198,9 +1198,9 @@ public com.google.cloud.videointelligence.v1p3beta1.Entity getCategoryEntities(i * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Entity category_entities = 2; @@ -1224,9 +1224,9 @@ public Builder setCategoryEntities( * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Entity category_entities = 2; @@ -1247,9 +1247,9 @@ public Builder setCategoryEntities( * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Entity category_entities = 2; @@ -1272,9 +1272,9 @@ public Builder addCategoryEntities(com.google.cloud.videointelligence.v1p3beta1. * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Entity category_entities = 2; @@ -1298,9 +1298,9 @@ public Builder addCategoryEntities( * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Entity category_entities = 2; @@ -1321,9 +1321,9 @@ public Builder addCategoryEntities( * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Entity category_entities = 2; @@ -1344,9 +1344,9 @@ public Builder addCategoryEntities( * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Entity category_entities = 2; @@ -1367,9 +1367,9 @@ public Builder addAllCategoryEntities( * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Entity category_entities = 2; @@ -1389,9 +1389,9 @@ public Builder clearCategoryEntities() { * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Entity category_entities = 2; @@ -1411,9 +1411,9 @@ public Builder removeCategoryEntities(int index) { * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Entity category_entities = 2; @@ -1427,9 +1427,9 @@ public com.google.cloud.videointelligence.v1p3beta1.Entity.Builder getCategoryEn * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Entity category_entities = 2; @@ -1447,9 +1447,9 @@ public com.google.cloud.videointelligence.v1p3beta1.Entity.Builder getCategoryEn * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Entity category_entities = 2; @@ -1467,9 +1467,9 @@ public com.google.cloud.videointelligence.v1p3beta1.Entity.Builder getCategoryEn * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Entity category_entities = 2; @@ -1484,9 +1484,9 @@ public com.google.cloud.videointelligence.v1p3beta1.Entity.Builder getCategoryEn * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Entity category_entities = 2; @@ -1502,9 +1502,9 @@ public com.google.cloud.videointelligence.v1p3beta1.Entity.Builder addCategoryEn * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Entity category_entities = 2; diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/LabelAnnotationOrBuilder.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/LabelAnnotationOrBuilder.java index 0e1caa3eb..508d78ef9 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/LabelAnnotationOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/LabelAnnotationOrBuilder.java @@ -63,9 +63,9 @@ public interface LabelAnnotationOrBuilder * *
    * Common categories for the detected entity.
-   * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-   * cases there might be more than one categories e.g. `Terrier` could also be
-   * a `pet`.
+   * For example, when the label is `Terrier`, the category is likely `dog`. And
+   * in some cases there might be more than one categories e.g., `Terrier` could
+   * also be a `pet`.
    * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Entity category_entities = 2; @@ -76,9 +76,9 @@ public interface LabelAnnotationOrBuilder * *
    * Common categories for the detected entity.
-   * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-   * cases there might be more than one categories e.g. `Terrier` could also be
-   * a `pet`.
+   * For example, when the label is `Terrier`, the category is likely `dog`. And
+   * in some cases there might be more than one categories e.g., `Terrier` could
+   * also be a `pet`.
    * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Entity category_entities = 2; @@ -89,9 +89,9 @@ public interface LabelAnnotationOrBuilder * *
    * Common categories for the detected entity.
-   * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-   * cases there might be more than one categories e.g. `Terrier` could also be
-   * a `pet`.
+   * For example, when the label is `Terrier`, the category is likely `dog`. And
+   * in some cases there might be more than one categories e.g., `Terrier` could
+   * also be a `pet`.
    * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Entity category_entities = 2; @@ -102,9 +102,9 @@ public interface LabelAnnotationOrBuilder * *
    * Common categories for the detected entity.
-   * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-   * cases there might be more than one categories e.g. `Terrier` could also be
-   * a `pet`.
+   * For example, when the label is `Terrier`, the category is likely `dog`. And
+   * in some cases there might be more than one categories e.g., `Terrier` could
+   * also be a `pet`.
    * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Entity category_entities = 2; @@ -116,9 +116,9 @@ public interface LabelAnnotationOrBuilder * *
    * Common categories for the detected entity.
-   * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-   * cases there might be more than one categories e.g. `Terrier` could also be
-   * a `pet`.
+   * For example, when the label is `Terrier`, the category is likely `dog`. And
+   * in some cases there might be more than one categories e.g., `Terrier` could
+   * also be a `pet`.
    * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Entity category_entities = 2; diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/LabelDetectionConfig.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/LabelDetectionConfig.java index e93ccfb3d..e30e7e7b3 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/LabelDetectionConfig.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/LabelDetectionConfig.java @@ -183,9 +183,9 @@ public com.google.cloud.videointelligence.v1p3beta1.LabelDetectionMode getLabelD * * *
-   * Whether the video has been shot from a stationary (i.e. non-moving) camera.
-   * When set to true, might improve detection accuracy for moving objects.
-   * Should be used with `SHOT_AND_FRAME_MODE` enabled.
+   * Whether the video has been shot from a stationary (i.e., non-moving)
+   * camera. When set to true, might improve detection accuracy for moving
+   * objects. Should be used with `SHOT_AND_FRAME_MODE` enabled.
    * 
* * bool stationary_camera = 2; @@ -257,7 +257,7 @@ public com.google.protobuf.ByteString getModelBytes() { * frame-level detection. If not set, it is set to 0.4 by default. The valid * range for this threshold is [0.1, 0.9]. Any value set outside of this * range will be clipped. - * Note: for best results please follow the default threshold. We will update + * Note: For best results, follow the default threshold. We will update * the default threshold everytime when we release a new model. * * @@ -276,10 +276,10 @@ public float getFrameConfidenceThreshold() { * *
    * The confidence threshold we perform filtering on the labels from
-   * video-level and shot-level detections. If not set, it is set to 0.3 by
+   * video-level and shot-level detections. If not set, it's set to 0.3 by
    * default. The valid range for this threshold is [0.1, 0.9]. Any value set
    * outside of this range will be clipped.
-   * Note: for best results please follow the default threshold. We will update
+   * Note: For best results, follow the default threshold. We will update
    * the default threshold everytime when we release a new model.
    * 
* @@ -796,9 +796,9 @@ public Builder clearLabelDetectionMode() { * * *
-     * Whether the video has been shot from a stationary (i.e. non-moving) camera.
-     * When set to true, might improve detection accuracy for moving objects.
-     * Should be used with `SHOT_AND_FRAME_MODE` enabled.
+     * Whether the video has been shot from a stationary (i.e., non-moving)
+     * camera. When set to true, might improve detection accuracy for moving
+     * objects. Should be used with `SHOT_AND_FRAME_MODE` enabled.
      * 
* * bool stationary_camera = 2; @@ -812,9 +812,9 @@ public boolean getStationaryCamera() { * * *
-     * Whether the video has been shot from a stationary (i.e. non-moving) camera.
-     * When set to true, might improve detection accuracy for moving objects.
-     * Should be used with `SHOT_AND_FRAME_MODE` enabled.
+     * Whether the video has been shot from a stationary (i.e., non-moving)
+     * camera. When set to true, might improve detection accuracy for moving
+     * objects. Should be used with `SHOT_AND_FRAME_MODE` enabled.
      * 
* * bool stationary_camera = 2; @@ -832,9 +832,9 @@ public Builder setStationaryCamera(boolean value) { * * *
-     * Whether the video has been shot from a stationary (i.e. non-moving) camera.
-     * When set to true, might improve detection accuracy for moving objects.
-     * Should be used with `SHOT_AND_FRAME_MODE` enabled.
+     * Whether the video has been shot from a stationary (i.e., non-moving)
+     * camera. When set to true, might improve detection accuracy for moving
+     * objects. Should be used with `SHOT_AND_FRAME_MODE` enabled.
      * 
* * bool stationary_camera = 2; @@ -973,7 +973,7 @@ public Builder setModelBytes(com.google.protobuf.ByteString value) { * frame-level detection. If not set, it is set to 0.4 by default. The valid * range for this threshold is [0.1, 0.9]. Any value set outside of this * range will be clipped. - * Note: for best results please follow the default threshold. We will update + * Note: For best results, follow the default threshold. We will update * the default threshold everytime when we release a new model. * * @@ -992,7 +992,7 @@ public float getFrameConfidenceThreshold() { * frame-level detection. If not set, it is set to 0.4 by default. The valid * range for this threshold is [0.1, 0.9]. Any value set outside of this * range will be clipped. - * Note: for best results please follow the default threshold. We will update + * Note: For best results, follow the default threshold. We will update * the default threshold everytime when we release a new model. * * @@ -1015,7 +1015,7 @@ public Builder setFrameConfidenceThreshold(float value) { * frame-level detection. If not set, it is set to 0.4 by default. The valid * range for this threshold is [0.1, 0.9]. Any value set outside of this * range will be clipped. - * Note: for best results please follow the default threshold. We will update + * Note: For best results, follow the default threshold. We will update * the default threshold everytime when we release a new model. * * @@ -1036,10 +1036,10 @@ public Builder clearFrameConfidenceThreshold() { * *
      * The confidence threshold we perform filtering on the labels from
-     * video-level and shot-level detections. If not set, it is set to 0.3 by
+     * video-level and shot-level detections. If not set, it's set to 0.3 by
      * default. The valid range for this threshold is [0.1, 0.9]. Any value set
      * outside of this range will be clipped.
-     * Note: for best results please follow the default threshold. We will update
+     * Note: For best results, follow the default threshold. We will update
      * the default threshold everytime when we release a new model.
      * 
* @@ -1055,10 +1055,10 @@ public float getVideoConfidenceThreshold() { * *
      * The confidence threshold we perform filtering on the labels from
-     * video-level and shot-level detections. If not set, it is set to 0.3 by
+     * video-level and shot-level detections. If not set, it's set to 0.3 by
      * default. The valid range for this threshold is [0.1, 0.9]. Any value set
      * outside of this range will be clipped.
-     * Note: for best results please follow the default threshold. We will update
+     * Note: For best results, follow the default threshold. We will update
      * the default threshold everytime when we release a new model.
      * 
* @@ -1078,10 +1078,10 @@ public Builder setVideoConfidenceThreshold(float value) { * *
      * The confidence threshold we perform filtering on the labels from
-     * video-level and shot-level detections. If not set, it is set to 0.3 by
+     * video-level and shot-level detections. If not set, it's set to 0.3 by
      * default. The valid range for this threshold is [0.1, 0.9]. Any value set
      * outside of this range will be clipped.
-     * Note: for best results please follow the default threshold. We will update
+     * Note: For best results, follow the default threshold. We will update
      * the default threshold everytime when we release a new model.
      * 
* diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/LabelDetectionConfigOrBuilder.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/LabelDetectionConfigOrBuilder.java index ddc3d8e80..fdfee3564 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/LabelDetectionConfigOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/LabelDetectionConfigOrBuilder.java @@ -58,9 +58,9 @@ public interface LabelDetectionConfigOrBuilder * * *
-   * Whether the video has been shot from a stationary (i.e. non-moving) camera.
-   * When set to true, might improve detection accuracy for moving objects.
-   * Should be used with `SHOT_AND_FRAME_MODE` enabled.
+   * Whether the video has been shot from a stationary (i.e., non-moving)
+   * camera. When set to true, might improve detection accuracy for moving
+   * objects. Should be used with `SHOT_AND_FRAME_MODE` enabled.
    * 
* * bool stationary_camera = 2; @@ -106,7 +106,7 @@ public interface LabelDetectionConfigOrBuilder * frame-level detection. If not set, it is set to 0.4 by default. The valid * range for this threshold is [0.1, 0.9]. Any value set outside of this * range will be clipped. - * Note: for best results please follow the default threshold. We will update + * Note: For best results, follow the default threshold. We will update * the default threshold everytime when we release a new model. * * @@ -121,10 +121,10 @@ public interface LabelDetectionConfigOrBuilder * *
    * The confidence threshold we perform filtering on the labels from
-   * video-level and shot-level detections. If not set, it is set to 0.3 by
+   * video-level and shot-level detections. If not set, it's set to 0.3 by
    * default. The valid range for this threshold is [0.1, 0.9]. Any value set
    * outside of this range will be clipped.
-   * Note: for best results please follow the default threshold. We will update
+   * Note: For best results, follow the default threshold. We will update
    * the default threshold everytime when we release a new model.
    * 
* diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/LabelDetectionMode.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/LabelDetectionMode.java index 5947486b8..1698482fa 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/LabelDetectionMode.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/LabelDetectionMode.java @@ -173,7 +173,7 @@ public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor return com.google.cloud.videointelligence.v1p3beta1.VideoIntelligenceServiceProto .getDescriptor() .getEnumTypes() - .get(1); + .get(0); } private static final LabelDetectionMode[] VALUES = values(); diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/Likelihood.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/Likelihood.java index 3d032f0d1..425478161 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/Likelihood.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/Likelihood.java @@ -216,7 +216,7 @@ public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor return com.google.cloud.videointelligence.v1p3beta1.VideoIntelligenceServiceProto .getDescriptor() .getEnumTypes() - .get(2); + .get(1); } private static final Likelihood[] VALUES = values(); diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/ObjectTrackingAnnotation.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/ObjectTrackingAnnotation.java index 489224f5e..81c59413f 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/ObjectTrackingAnnotation.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/ObjectTrackingAnnotation.java @@ -217,6 +217,81 @@ public TrackInfoCase getTrackInfoCase() { return TrackInfoCase.forNumber(trackInfoCase_); } + public static final int SEGMENT_FIELD_NUMBER = 3; + /** + * + * + *
+   * Non-streaming batch mode ONLY.
+   * Each object track corresponds to one video segment where it appears.
+   * 
+ * + * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; + * + * @return Whether the segment field is set. + */ + public boolean hasSegment() { + return trackInfoCase_ == 3; + } + /** + * + * + *
+   * Non-streaming batch mode ONLY.
+   * Each object track corresponds to one video segment where it appears.
+   * 
+ * + * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; + * + * @return The segment. + */ + public com.google.cloud.videointelligence.v1p3beta1.VideoSegment getSegment() { + if (trackInfoCase_ == 3) { + return (com.google.cloud.videointelligence.v1p3beta1.VideoSegment) trackInfo_; + } + return com.google.cloud.videointelligence.v1p3beta1.VideoSegment.getDefaultInstance(); + } + /** + * + * + *
+   * Non-streaming batch mode ONLY.
+   * Each object track corresponds to one video segment where it appears.
+   * 
+ * + * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; + */ + public com.google.cloud.videointelligence.v1p3beta1.VideoSegmentOrBuilder getSegmentOrBuilder() { + if (trackInfoCase_ == 3) { + return (com.google.cloud.videointelligence.v1p3beta1.VideoSegment) trackInfo_; + } + return com.google.cloud.videointelligence.v1p3beta1.VideoSegment.getDefaultInstance(); + } + + public static final int TRACK_ID_FIELD_NUMBER = 5; + /** + * + * + *
+   * Streaming mode ONLY.
+   * In streaming mode, we do not know the end time of a tracked object
+   * before it is completed. Hence, there is no VideoSegment info returned.
+   * Instead, we provide a unique identifiable integer track_id so that
+   * the customers can correlate the results of the ongoing
+   * ObjectTrackAnnotation of the same track_id over time.
+   * 
+ * + * int64 track_id = 5; + * + * @return The trackId. + */ + public long getTrackId() { + if (trackInfoCase_ == 5) { + return (java.lang.Long) trackInfo_; + } + return 0L; + } + public static final int ENTITY_FIELD_NUMBER = 1; private com.google.cloud.videointelligence.v1p3beta1.Entity entity_; /** @@ -361,81 +436,6 @@ public com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame getFrame return frames_.get(index); } - public static final int SEGMENT_FIELD_NUMBER = 3; - /** - * - * - *
-   * Non-streaming batch mode ONLY.
-   * Each object track corresponds to one video segment where it appears.
-   * 
- * - * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; - * - * @return Whether the segment field is set. - */ - public boolean hasSegment() { - return trackInfoCase_ == 3; - } - /** - * - * - *
-   * Non-streaming batch mode ONLY.
-   * Each object track corresponds to one video segment where it appears.
-   * 
- * - * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; - * - * @return The segment. - */ - public com.google.cloud.videointelligence.v1p3beta1.VideoSegment getSegment() { - if (trackInfoCase_ == 3) { - return (com.google.cloud.videointelligence.v1p3beta1.VideoSegment) trackInfo_; - } - return com.google.cloud.videointelligence.v1p3beta1.VideoSegment.getDefaultInstance(); - } - /** - * - * - *
-   * Non-streaming batch mode ONLY.
-   * Each object track corresponds to one video segment where it appears.
-   * 
- * - * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; - */ - public com.google.cloud.videointelligence.v1p3beta1.VideoSegmentOrBuilder getSegmentOrBuilder() { - if (trackInfoCase_ == 3) { - return (com.google.cloud.videointelligence.v1p3beta1.VideoSegment) trackInfo_; - } - return com.google.cloud.videointelligence.v1p3beta1.VideoSegment.getDefaultInstance(); - } - - public static final int TRACK_ID_FIELD_NUMBER = 5; - /** - * - * - *
-   * Streaming mode ONLY.
-   * In streaming mode, we do not know the end time of a tracked object
-   * before it is completed. Hence, there is no VideoSegment info returned.
-   * Instead, we provide a unique identifiable integer track_id so that
-   * the customers can correlate the results of the ongoing
-   * ObjectTrackAnnotation of the same track_id over time.
-   * 
- * - * int64 track_id = 5; - * - * @return The trackId. - */ - public long getTrackId() { - if (trackInfoCase_ == 5) { - return (java.lang.Long) trackInfo_; - } - return 0L; - } - private byte memoizedIsInitialized = -1; @java.lang.Override @@ -757,6 +757,16 @@ public com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation bui com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation result = new com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation(this); int from_bitField0_ = bitField0_; + if (trackInfoCase_ == 3) { + if (segmentBuilder_ == null) { + result.trackInfo_ = trackInfo_; + } else { + result.trackInfo_ = segmentBuilder_.build(); + } + } + if (trackInfoCase_ == 5) { + result.trackInfo_ = trackInfo_; + } if (entityBuilder_ == null) { result.entity_ = entity_; } else { @@ -772,16 +782,6 @@ public com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation bui } else { result.frames_ = framesBuilder_.build(); } - if (trackInfoCase_ == 3) { - if (segmentBuilder_ == null) { - result.trackInfo_ = trackInfo_; - } else { - result.trackInfo_ = segmentBuilder_.build(); - } - } - if (trackInfoCase_ == 5) { - result.trackInfo_ = trackInfo_; - } result.trackInfoCase_ = trackInfoCase_; onBuilt(); return result; @@ -932,222 +932,266 @@ public Builder clearTrackInfo() { private int bitField0_; - private com.google.cloud.videointelligence.v1p3beta1.Entity entity_; private com.google.protobuf.SingleFieldBuilderV3< - com.google.cloud.videointelligence.v1p3beta1.Entity, - com.google.cloud.videointelligence.v1p3beta1.Entity.Builder, - com.google.cloud.videointelligence.v1p3beta1.EntityOrBuilder> - entityBuilder_; + com.google.cloud.videointelligence.v1p3beta1.VideoSegment, + com.google.cloud.videointelligence.v1p3beta1.VideoSegment.Builder, + com.google.cloud.videointelligence.v1p3beta1.VideoSegmentOrBuilder> + segmentBuilder_; /** * * *
-     * Entity to specify the object category that this track is labeled as.
+     * Non-streaming batch mode ONLY.
+     * Each object track corresponds to one video segment where it appears.
      * 
* - * .google.cloud.videointelligence.v1p3beta1.Entity entity = 1; + * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; * - * @return Whether the entity field is set. + * @return Whether the segment field is set. */ - public boolean hasEntity() { - return entityBuilder_ != null || entity_ != null; + public boolean hasSegment() { + return trackInfoCase_ == 3; } /** * * *
-     * Entity to specify the object category that this track is labeled as.
+     * Non-streaming batch mode ONLY.
+     * Each object track corresponds to one video segment where it appears.
      * 
* - * .google.cloud.videointelligence.v1p3beta1.Entity entity = 1; + * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; * - * @return The entity. + * @return The segment. */ - public com.google.cloud.videointelligence.v1p3beta1.Entity getEntity() { - if (entityBuilder_ == null) { - return entity_ == null - ? com.google.cloud.videointelligence.v1p3beta1.Entity.getDefaultInstance() - : entity_; + public com.google.cloud.videointelligence.v1p3beta1.VideoSegment getSegment() { + if (segmentBuilder_ == null) { + if (trackInfoCase_ == 3) { + return (com.google.cloud.videointelligence.v1p3beta1.VideoSegment) trackInfo_; + } + return com.google.cloud.videointelligence.v1p3beta1.VideoSegment.getDefaultInstance(); } else { - return entityBuilder_.getMessage(); + if (trackInfoCase_ == 3) { + return segmentBuilder_.getMessage(); + } + return com.google.cloud.videointelligence.v1p3beta1.VideoSegment.getDefaultInstance(); } } /** * * *
-     * Entity to specify the object category that this track is labeled as.
+     * Non-streaming batch mode ONLY.
+     * Each object track corresponds to one video segment where it appears.
      * 
* - * .google.cloud.videointelligence.v1p3beta1.Entity entity = 1; + * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; */ - public Builder setEntity(com.google.cloud.videointelligence.v1p3beta1.Entity value) { - if (entityBuilder_ == null) { + public Builder setSegment(com.google.cloud.videointelligence.v1p3beta1.VideoSegment value) { + if (segmentBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - entity_ = value; + trackInfo_ = value; onChanged(); } else { - entityBuilder_.setMessage(value); + segmentBuilder_.setMessage(value); } - + trackInfoCase_ = 3; return this; } /** * * *
-     * Entity to specify the object category that this track is labeled as.
+     * Non-streaming batch mode ONLY.
+     * Each object track corresponds to one video segment where it appears.
      * 
* - * .google.cloud.videointelligence.v1p3beta1.Entity entity = 1; + * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; */ - public Builder setEntity( - com.google.cloud.videointelligence.v1p3beta1.Entity.Builder builderForValue) { - if (entityBuilder_ == null) { - entity_ = builderForValue.build(); + public Builder setSegment( + com.google.cloud.videointelligence.v1p3beta1.VideoSegment.Builder builderForValue) { + if (segmentBuilder_ == null) { + trackInfo_ = builderForValue.build(); onChanged(); } else { - entityBuilder_.setMessage(builderForValue.build()); + segmentBuilder_.setMessage(builderForValue.build()); } - + trackInfoCase_ = 3; return this; } /** * * *
-     * Entity to specify the object category that this track is labeled as.
+     * Non-streaming batch mode ONLY.
+     * Each object track corresponds to one video segment where it appears.
      * 
* - * .google.cloud.videointelligence.v1p3beta1.Entity entity = 1; + * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; */ - public Builder mergeEntity(com.google.cloud.videointelligence.v1p3beta1.Entity value) { - if (entityBuilder_ == null) { - if (entity_ != null) { - entity_ = - com.google.cloud.videointelligence.v1p3beta1.Entity.newBuilder(entity_) - .mergeFrom(value) + public Builder mergeSegment(com.google.cloud.videointelligence.v1p3beta1.VideoSegment value) { + if (segmentBuilder_ == null) { + if (trackInfoCase_ == 3 + && trackInfo_ + != com.google.cloud.videointelligence.v1p3beta1.VideoSegment.getDefaultInstance()) { + trackInfo_ = + com.google.cloud.videointelligence.v1p3beta1.VideoSegment.newBuilder( + (com.google.cloud.videointelligence.v1p3beta1.VideoSegment) trackInfo_) + .mergeFrom(value) .buildPartial(); } else { - entity_ = value; + trackInfo_ = value; } onChanged(); } else { - entityBuilder_.mergeFrom(value); + if (trackInfoCase_ == 3) { + segmentBuilder_.mergeFrom(value); + } + segmentBuilder_.setMessage(value); } - + trackInfoCase_ = 3; return this; } /** * * *
-     * Entity to specify the object category that this track is labeled as.
+     * Non-streaming batch mode ONLY.
+     * Each object track corresponds to one video segment where it appears.
      * 
* - * .google.cloud.videointelligence.v1p3beta1.Entity entity = 1; + * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; */ - public Builder clearEntity() { - if (entityBuilder_ == null) { - entity_ = null; - onChanged(); + public Builder clearSegment() { + if (segmentBuilder_ == null) { + if (trackInfoCase_ == 3) { + trackInfoCase_ = 0; + trackInfo_ = null; + onChanged(); + } } else { - entity_ = null; - entityBuilder_ = null; + if (trackInfoCase_ == 3) { + trackInfoCase_ = 0; + trackInfo_ = null; + } + segmentBuilder_.clear(); } - return this; } /** * * *
-     * Entity to specify the object category that this track is labeled as.
+     * Non-streaming batch mode ONLY.
+     * Each object track corresponds to one video segment where it appears.
      * 
* - * .google.cloud.videointelligence.v1p3beta1.Entity entity = 1; + * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; */ - public com.google.cloud.videointelligence.v1p3beta1.Entity.Builder getEntityBuilder() { - - onChanged(); - return getEntityFieldBuilder().getBuilder(); + public com.google.cloud.videointelligence.v1p3beta1.VideoSegment.Builder getSegmentBuilder() { + return getSegmentFieldBuilder().getBuilder(); } /** * * *
-     * Entity to specify the object category that this track is labeled as.
+     * Non-streaming batch mode ONLY.
+     * Each object track corresponds to one video segment where it appears.
      * 
* - * .google.cloud.videointelligence.v1p3beta1.Entity entity = 1; + * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; */ - public com.google.cloud.videointelligence.v1p3beta1.EntityOrBuilder getEntityOrBuilder() { - if (entityBuilder_ != null) { - return entityBuilder_.getMessageOrBuilder(); + public com.google.cloud.videointelligence.v1p3beta1.VideoSegmentOrBuilder + getSegmentOrBuilder() { + if ((trackInfoCase_ == 3) && (segmentBuilder_ != null)) { + return segmentBuilder_.getMessageOrBuilder(); } else { - return entity_ == null - ? com.google.cloud.videointelligence.v1p3beta1.Entity.getDefaultInstance() - : entity_; + if (trackInfoCase_ == 3) { + return (com.google.cloud.videointelligence.v1p3beta1.VideoSegment) trackInfo_; + } + return com.google.cloud.videointelligence.v1p3beta1.VideoSegment.getDefaultInstance(); } } /** * * *
-     * Entity to specify the object category that this track is labeled as.
+     * Non-streaming batch mode ONLY.
+     * Each object track corresponds to one video segment where it appears.
      * 
* - * .google.cloud.videointelligence.v1p3beta1.Entity entity = 1; + * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; */ private com.google.protobuf.SingleFieldBuilderV3< - com.google.cloud.videointelligence.v1p3beta1.Entity, - com.google.cloud.videointelligence.v1p3beta1.Entity.Builder, - com.google.cloud.videointelligence.v1p3beta1.EntityOrBuilder> - getEntityFieldBuilder() { - if (entityBuilder_ == null) { - entityBuilder_ = + com.google.cloud.videointelligence.v1p3beta1.VideoSegment, + com.google.cloud.videointelligence.v1p3beta1.VideoSegment.Builder, + com.google.cloud.videointelligence.v1p3beta1.VideoSegmentOrBuilder> + getSegmentFieldBuilder() { + if (segmentBuilder_ == null) { + if (!(trackInfoCase_ == 3)) { + trackInfo_ = + com.google.cloud.videointelligence.v1p3beta1.VideoSegment.getDefaultInstance(); + } + segmentBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< - com.google.cloud.videointelligence.v1p3beta1.Entity, - com.google.cloud.videointelligence.v1p3beta1.Entity.Builder, - com.google.cloud.videointelligence.v1p3beta1.EntityOrBuilder>( - getEntity(), getParentForChildren(), isClean()); - entity_ = null; + com.google.cloud.videointelligence.v1p3beta1.VideoSegment, + com.google.cloud.videointelligence.v1p3beta1.VideoSegment.Builder, + com.google.cloud.videointelligence.v1p3beta1.VideoSegmentOrBuilder>( + (com.google.cloud.videointelligence.v1p3beta1.VideoSegment) trackInfo_, + getParentForChildren(), + isClean()); + trackInfo_ = null; } - return entityBuilder_; + trackInfoCase_ = 3; + onChanged(); + ; + return segmentBuilder_; } - private float confidence_; /** * * *
-     * Object category's labeling confidence of this track.
+     * Streaming mode ONLY.
+     * In streaming mode, we do not know the end time of a tracked object
+     * before it is completed. Hence, there is no VideoSegment info returned.
+     * Instead, we provide a unique identifiable integer track_id so that
+     * the customers can correlate the results of the ongoing
+     * ObjectTrackAnnotation of the same track_id over time.
      * 
* - * float confidence = 4; + * int64 track_id = 5; * - * @return The confidence. + * @return The trackId. */ - public float getConfidence() { - return confidence_; + public long getTrackId() { + if (trackInfoCase_ == 5) { + return (java.lang.Long) trackInfo_; + } + return 0L; } /** * * *
-     * Object category's labeling confidence of this track.
+     * Streaming mode ONLY.
+     * In streaming mode, we do not know the end time of a tracked object
+     * before it is completed. Hence, there is no VideoSegment info returned.
+     * Instead, we provide a unique identifiable integer track_id so that
+     * the customers can correlate the results of the ongoing
+     * ObjectTrackAnnotation of the same track_id over time.
      * 
* - * float confidence = 4; + * int64 track_id = 5; * - * @param value The confidence to set. + * @param value The trackId to set. * @return This builder for chaining. */ - public Builder setConfidence(float value) { - - confidence_ = value; + public Builder setTrackId(long value) { + trackInfoCase_ = 5; + trackInfo_ = value; onChanged(); return this; } @@ -1155,205 +1199,282 @@ public Builder setConfidence(float value) { * * *
-     * Object category's labeling confidence of this track.
+     * Streaming mode ONLY.
+     * In streaming mode, we do not know the end time of a tracked object
+     * before it is completed. Hence, there is no VideoSegment info returned.
+     * Instead, we provide a unique identifiable integer track_id so that
+     * the customers can correlate the results of the ongoing
+     * ObjectTrackAnnotation of the same track_id over time.
      * 
* - * float confidence = 4; + * int64 track_id = 5; * * @return This builder for chaining. */ - public Builder clearConfidence() { - - confidence_ = 0F; - onChanged(); - return this; - } - - private java.util.List - frames_ = java.util.Collections.emptyList(); - - private void ensureFramesIsMutable() { - if (!((bitField0_ & 0x00000001) != 0)) { - frames_ = - new java.util.ArrayList< - com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame>(frames_); - bitField0_ |= 0x00000001; + public Builder clearTrackId() { + if (trackInfoCase_ == 5) { + trackInfoCase_ = 0; + trackInfo_ = null; + onChanged(); } + return this; } - private com.google.protobuf.RepeatedFieldBuilderV3< - com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame, - com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame.Builder, - com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrameOrBuilder> - framesBuilder_; - + private com.google.cloud.videointelligence.v1p3beta1.Entity entity_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.videointelligence.v1p3beta1.Entity, + com.google.cloud.videointelligence.v1p3beta1.Entity.Builder, + com.google.cloud.videointelligence.v1p3beta1.EntityOrBuilder> + entityBuilder_; /** * * *
-     * Information corresponding to all frames where this object track appears.
-     * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
-     * messages in frames.
-     * Streaming mode: it can only be one ObjectTrackingFrame message in frames.
+     * Entity to specify the object category that this track is labeled as.
      * 
* - * repeated .google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame frames = 2; - * + * .google.cloud.videointelligence.v1p3beta1.Entity entity = 1; + * + * @return Whether the entity field is set. */ - public java.util.List - getFramesList() { - if (framesBuilder_ == null) { - return java.util.Collections.unmodifiableList(frames_); - } else { - return framesBuilder_.getMessageList(); - } + public boolean hasEntity() { + return entityBuilder_ != null || entity_ != null; } /** * * *
-     * Information corresponding to all frames where this object track appears.
-     * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
-     * messages in frames.
-     * Streaming mode: it can only be one ObjectTrackingFrame message in frames.
+     * Entity to specify the object category that this track is labeled as.
      * 
* - * repeated .google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame frames = 2; - * + * .google.cloud.videointelligence.v1p3beta1.Entity entity = 1; + * + * @return The entity. */ - public int getFramesCount() { - if (framesBuilder_ == null) { - return frames_.size(); + public com.google.cloud.videointelligence.v1p3beta1.Entity getEntity() { + if (entityBuilder_ == null) { + return entity_ == null + ? com.google.cloud.videointelligence.v1p3beta1.Entity.getDefaultInstance() + : entity_; } else { - return framesBuilder_.getCount(); + return entityBuilder_.getMessage(); } } /** * * *
-     * Information corresponding to all frames where this object track appears.
-     * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
-     * messages in frames.
-     * Streaming mode: it can only be one ObjectTrackingFrame message in frames.
+     * Entity to specify the object category that this track is labeled as.
      * 
* - * repeated .google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame frames = 2; - * + * .google.cloud.videointelligence.v1p3beta1.Entity entity = 1; */ - public com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame getFrames(int index) { - if (framesBuilder_ == null) { - return frames_.get(index); + public Builder setEntity(com.google.cloud.videointelligence.v1p3beta1.Entity value) { + if (entityBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + entity_ = value; + onChanged(); } else { - return framesBuilder_.getMessage(index); + entityBuilder_.setMessage(value); } + + return this; } /** * * *
-     * Information corresponding to all frames where this object track appears.
-     * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
-     * messages in frames.
-     * Streaming mode: it can only be one ObjectTrackingFrame message in frames.
+     * Entity to specify the object category that this track is labeled as.
      * 
* - * repeated .google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame frames = 2; - * + * .google.cloud.videointelligence.v1p3beta1.Entity entity = 1; */ - public Builder setFrames( - int index, com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame value) { - if (framesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureFramesIsMutable(); - frames_.set(index, value); + public Builder setEntity( + com.google.cloud.videointelligence.v1p3beta1.Entity.Builder builderForValue) { + if (entityBuilder_ == null) { + entity_ = builderForValue.build(); onChanged(); } else { - framesBuilder_.setMessage(index, value); + entityBuilder_.setMessage(builderForValue.build()); } + return this; } /** * * *
-     * Information corresponding to all frames where this object track appears.
-     * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
-     * messages in frames.
-     * Streaming mode: it can only be one ObjectTrackingFrame message in frames.
+     * Entity to specify the object category that this track is labeled as.
      * 
* - * repeated .google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame frames = 2; - * + * .google.cloud.videointelligence.v1p3beta1.Entity entity = 1; */ - public Builder setFrames( - int index, - com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame.Builder builderForValue) { - if (framesBuilder_ == null) { - ensureFramesIsMutable(); - frames_.set(index, builderForValue.build()); + public Builder mergeEntity(com.google.cloud.videointelligence.v1p3beta1.Entity value) { + if (entityBuilder_ == null) { + if (entity_ != null) { + entity_ = + com.google.cloud.videointelligence.v1p3beta1.Entity.newBuilder(entity_) + .mergeFrom(value) + .buildPartial(); + } else { + entity_ = value; + } onChanged(); } else { - framesBuilder_.setMessage(index, builderForValue.build()); + entityBuilder_.mergeFrom(value); } + return this; } /** * * *
-     * Information corresponding to all frames where this object track appears.
-     * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
-     * messages in frames.
-     * Streaming mode: it can only be one ObjectTrackingFrame message in frames.
+     * Entity to specify the object category that this track is labeled as.
      * 
* - * repeated .google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame frames = 2; - * + * .google.cloud.videointelligence.v1p3beta1.Entity entity = 1; */ - public Builder addFrames( - com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame value) { - if (framesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureFramesIsMutable(); - frames_.add(value); + public Builder clearEntity() { + if (entityBuilder_ == null) { + entity_ = null; onChanged(); } else { - framesBuilder_.addMessage(value); + entity_ = null; + entityBuilder_ = null; } + return this; } /** * * *
-     * Information corresponding to all frames where this object track appears.
-     * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
-     * messages in frames.
-     * Streaming mode: it can only be one ObjectTrackingFrame message in frames.
+     * Entity to specify the object category that this track is labeled as.
      * 
* - * repeated .google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame frames = 2; - * + * .google.cloud.videointelligence.v1p3beta1.Entity entity = 1; */ - public Builder addFrames( - int index, com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame value) { - if (framesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureFramesIsMutable(); - frames_.add(index, value); - onChanged(); + public com.google.cloud.videointelligence.v1p3beta1.Entity.Builder getEntityBuilder() { + + onChanged(); + return getEntityFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Entity to specify the object category that this track is labeled as.
+     * 
+ * + * .google.cloud.videointelligence.v1p3beta1.Entity entity = 1; + */ + public com.google.cloud.videointelligence.v1p3beta1.EntityOrBuilder getEntityOrBuilder() { + if (entityBuilder_ != null) { + return entityBuilder_.getMessageOrBuilder(); } else { - framesBuilder_.addMessage(index, value); + return entity_ == null + ? com.google.cloud.videointelligence.v1p3beta1.Entity.getDefaultInstance() + : entity_; + } + } + /** + * + * + *
+     * Entity to specify the object category that this track is labeled as.
+     * 
+ * + * .google.cloud.videointelligence.v1p3beta1.Entity entity = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.videointelligence.v1p3beta1.Entity, + com.google.cloud.videointelligence.v1p3beta1.Entity.Builder, + com.google.cloud.videointelligence.v1p3beta1.EntityOrBuilder> + getEntityFieldBuilder() { + if (entityBuilder_ == null) { + entityBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.videointelligence.v1p3beta1.Entity, + com.google.cloud.videointelligence.v1p3beta1.Entity.Builder, + com.google.cloud.videointelligence.v1p3beta1.EntityOrBuilder>( + getEntity(), getParentForChildren(), isClean()); + entity_ = null; } + return entityBuilder_; + } + + private float confidence_; + /** + * + * + *
+     * Object category's labeling confidence of this track.
+     * 
+ * + * float confidence = 4; + * + * @return The confidence. + */ + public float getConfidence() { + return confidence_; + } + /** + * + * + *
+     * Object category's labeling confidence of this track.
+     * 
+ * + * float confidence = 4; + * + * @param value The confidence to set. + * @return This builder for chaining. + */ + public Builder setConfidence(float value) { + + confidence_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Object category's labeling confidence of this track.
+     * 
+ * + * float confidence = 4; + * + * @return This builder for chaining. + */ + public Builder clearConfidence() { + + confidence_ = 0F; + onChanged(); return this; } + + private java.util.List + frames_ = java.util.Collections.emptyList(); + + private void ensureFramesIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + frames_ = + new java.util.ArrayList< + com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame>(frames_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame, + com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame.Builder, + com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrameOrBuilder> + framesBuilder_; + /** * * @@ -1367,16 +1488,13 @@ public Builder addFrames( * repeated .google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame frames = 2; * */ - public Builder addFrames( - com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame.Builder builderForValue) { + public java.util.List + getFramesList() { if (framesBuilder_ == null) { - ensureFramesIsMutable(); - frames_.add(builderForValue.build()); - onChanged(); + return java.util.Collections.unmodifiableList(frames_); } else { - framesBuilder_.addMessage(builderForValue.build()); + return framesBuilder_.getMessageList(); } - return this; } /** * @@ -1391,17 +1509,12 @@ public Builder addFrames( * repeated .google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame frames = 2; * */ - public Builder addFrames( - int index, - com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame.Builder builderForValue) { + public int getFramesCount() { if (framesBuilder_ == null) { - ensureFramesIsMutable(); - frames_.add(index, builderForValue.build()); - onChanged(); + return frames_.size(); } else { - framesBuilder_.addMessage(index, builderForValue.build()); + return framesBuilder_.getCount(); } - return this; } /** * @@ -1416,18 +1529,12 @@ public Builder addFrames( * repeated .google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame frames = 2; * */ - public Builder addAllFrames( - java.lang.Iterable< - ? extends com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame> - values) { + public com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame getFrames(int index) { if (framesBuilder_ == null) { - ensureFramesIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll(values, frames_); - onChanged(); + return frames_.get(index); } else { - framesBuilder_.addAllMessages(values); + return framesBuilder_.getMessage(index); } - return this; } /** * @@ -1442,13 +1549,17 @@ public Builder addAllFrames( * repeated .google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame frames = 2; * */ - public Builder clearFrames() { + public Builder setFrames( + int index, com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame value) { if (framesBuilder_ == null) { - frames_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); + if (value == null) { + throw new NullPointerException(); + } + ensureFramesIsMutable(); + frames_.set(index, value); onChanged(); } else { - framesBuilder_.clear(); + framesBuilder_.setMessage(index, value); } return this; } @@ -1465,13 +1576,15 @@ public Builder clearFrames() { * repeated .google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame frames = 2; * */ - public Builder removeFrames(int index) { + public Builder setFrames( + int index, + com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame.Builder builderForValue) { if (framesBuilder_ == null) { ensureFramesIsMutable(); - frames_.remove(index); + frames_.set(index, builderForValue.build()); onChanged(); } else { - framesBuilder_.remove(index); + framesBuilder_.setMessage(index, builderForValue.build()); } return this; } @@ -1488,9 +1601,19 @@ public Builder removeFrames(int index) { * repeated .google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame frames = 2; * */ - public com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame.Builder - getFramesBuilder(int index) { - return getFramesFieldBuilder().getBuilder(index); + public Builder addFrames( + com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame value) { + if (framesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFramesIsMutable(); + frames_.add(value); + onChanged(); + } else { + framesBuilder_.addMessage(value); + } + return this; } /** * @@ -1505,13 +1628,19 @@ public Builder removeFrames(int index) { * repeated .google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame frames = 2; * */ - public com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrameOrBuilder - getFramesOrBuilder(int index) { + public Builder addFrames( + int index, com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame value) { if (framesBuilder_ == null) { - return frames_.get(index); + if (value == null) { + throw new NullPointerException(); + } + ensureFramesIsMutable(); + frames_.add(index, value); + onChanged(); } else { - return framesBuilder_.getMessageOrBuilder(index); + framesBuilder_.addMessage(index, value); } + return this; } /** * @@ -1526,14 +1655,16 @@ public Builder removeFrames(int index) { * repeated .google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame frames = 2; * */ - public java.util.List< - ? extends com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrameOrBuilder> - getFramesOrBuilderList() { - if (framesBuilder_ != null) { - return framesBuilder_.getMessageOrBuilderList(); + public Builder addFrames( + com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame.Builder builderForValue) { + if (framesBuilder_ == null) { + ensureFramesIsMutable(); + frames_.add(builderForValue.build()); + onChanged(); } else { - return java.util.Collections.unmodifiableList(frames_); + framesBuilder_.addMessage(builderForValue.build()); } + return this; } /** * @@ -1548,12 +1679,17 @@ public Builder removeFrames(int index) { * repeated .google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame frames = 2; * */ - public com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame.Builder - addFramesBuilder() { - return getFramesFieldBuilder() - .addBuilder( - com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame - .getDefaultInstance()); + public Builder addFrames( + int index, + com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame.Builder builderForValue) { + if (framesBuilder_ == null) { + ensureFramesIsMutable(); + frames_.add(index, builderForValue.build()); + onChanged(); + } else { + framesBuilder_.addMessage(index, builderForValue.build()); + } + return this; } /** * @@ -1568,13 +1704,18 @@ public Builder removeFrames(int index) { * repeated .google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame frames = 2; * */ - public com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame.Builder - addFramesBuilder(int index) { - return getFramesFieldBuilder() - .addBuilder( - index, - com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame - .getDefaultInstance()); + public Builder addAllFrames( + java.lang.Iterable< + ? extends com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame> + values) { + if (framesBuilder_ == null) { + ensureFramesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, frames_); + onChanged(); + } else { + framesBuilder_.addAllMessages(values); + } + return this; } /** * @@ -1589,314 +1730,173 @@ public Builder removeFrames(int index) { * repeated .google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame frames = 2; * */ - public java.util.List - getFramesBuilderList() { - return getFramesFieldBuilder().getBuilderList(); - } - - private com.google.protobuf.RepeatedFieldBuilderV3< - com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame, - com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame.Builder, - com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrameOrBuilder> - getFramesFieldBuilder() { + public Builder clearFrames() { if (framesBuilder_ == null) { - framesBuilder_ = - new com.google.protobuf.RepeatedFieldBuilderV3< - com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame, - com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame.Builder, - com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrameOrBuilder>( - frames_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); - frames_ = null; - } - return framesBuilder_; - } - - private com.google.protobuf.SingleFieldBuilderV3< - com.google.cloud.videointelligence.v1p3beta1.VideoSegment, - com.google.cloud.videointelligence.v1p3beta1.VideoSegment.Builder, - com.google.cloud.videointelligence.v1p3beta1.VideoSegmentOrBuilder> - segmentBuilder_; - /** - * - * - *
-     * Non-streaming batch mode ONLY.
-     * Each object track corresponds to one video segment where it appears.
-     * 
- * - * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; - * - * @return Whether the segment field is set. - */ - public boolean hasSegment() { - return trackInfoCase_ == 3; - } - /** - * - * - *
-     * Non-streaming batch mode ONLY.
-     * Each object track corresponds to one video segment where it appears.
-     * 
- * - * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; - * - * @return The segment. - */ - public com.google.cloud.videointelligence.v1p3beta1.VideoSegment getSegment() { - if (segmentBuilder_ == null) { - if (trackInfoCase_ == 3) { - return (com.google.cloud.videointelligence.v1p3beta1.VideoSegment) trackInfo_; - } - return com.google.cloud.videointelligence.v1p3beta1.VideoSegment.getDefaultInstance(); - } else { - if (trackInfoCase_ == 3) { - return segmentBuilder_.getMessage(); - } - return com.google.cloud.videointelligence.v1p3beta1.VideoSegment.getDefaultInstance(); - } - } - /** - * - * - *
-     * Non-streaming batch mode ONLY.
-     * Each object track corresponds to one video segment where it appears.
-     * 
- * - * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; - */ - public Builder setSegment(com.google.cloud.videointelligence.v1p3beta1.VideoSegment value) { - if (segmentBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - trackInfo_ = value; + frames_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { - segmentBuilder_.setMessage(value); + framesBuilder_.clear(); } - trackInfoCase_ = 3; return this; } /** * * *
-     * Non-streaming batch mode ONLY.
-     * Each object track corresponds to one video segment where it appears.
+     * Information corresponding to all frames where this object track appears.
+     * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
+     * messages in frames.
+     * Streaming mode: it can only be one ObjectTrackingFrame message in frames.
      * 
* - * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; + * repeated .google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame frames = 2; + * */ - public Builder setSegment( - com.google.cloud.videointelligence.v1p3beta1.VideoSegment.Builder builderForValue) { - if (segmentBuilder_ == null) { - trackInfo_ = builderForValue.build(); + public Builder removeFrames(int index) { + if (framesBuilder_ == null) { + ensureFramesIsMutable(); + frames_.remove(index); onChanged(); } else { - segmentBuilder_.setMessage(builderForValue.build()); + framesBuilder_.remove(index); } - trackInfoCase_ = 3; return this; } /** * * *
-     * Non-streaming batch mode ONLY.
-     * Each object track corresponds to one video segment where it appears.
+     * Information corresponding to all frames where this object track appears.
+     * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
+     * messages in frames.
+     * Streaming mode: it can only be one ObjectTrackingFrame message in frames.
      * 
* - * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; + * repeated .google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame frames = 2; + * */ - public Builder mergeSegment(com.google.cloud.videointelligence.v1p3beta1.VideoSegment value) { - if (segmentBuilder_ == null) { - if (trackInfoCase_ == 3 - && trackInfo_ - != com.google.cloud.videointelligence.v1p3beta1.VideoSegment.getDefaultInstance()) { - trackInfo_ = - com.google.cloud.videointelligence.v1p3beta1.VideoSegment.newBuilder( - (com.google.cloud.videointelligence.v1p3beta1.VideoSegment) trackInfo_) - .mergeFrom(value) - .buildPartial(); - } else { - trackInfo_ = value; - } - onChanged(); - } else { - if (trackInfoCase_ == 3) { - segmentBuilder_.mergeFrom(value); - } - segmentBuilder_.setMessage(value); - } - trackInfoCase_ = 3; - return this; + public com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame.Builder + getFramesBuilder(int index) { + return getFramesFieldBuilder().getBuilder(index); } /** * * *
-     * Non-streaming batch mode ONLY.
-     * Each object track corresponds to one video segment where it appears.
+     * Information corresponding to all frames where this object track appears.
+     * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
+     * messages in frames.
+     * Streaming mode: it can only be one ObjectTrackingFrame message in frames.
      * 
* - * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; + * repeated .google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame frames = 2; + * */ - public Builder clearSegment() { - if (segmentBuilder_ == null) { - if (trackInfoCase_ == 3) { - trackInfoCase_ = 0; - trackInfo_ = null; - onChanged(); - } + public com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrameOrBuilder + getFramesOrBuilder(int index) { + if (framesBuilder_ == null) { + return frames_.get(index); } else { - if (trackInfoCase_ == 3) { - trackInfoCase_ = 0; - trackInfo_ = null; - } - segmentBuilder_.clear(); + return framesBuilder_.getMessageOrBuilder(index); } - return this; } /** * * *
-     * Non-streaming batch mode ONLY.
-     * Each object track corresponds to one video segment where it appears.
-     * 
- * - * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; - */ - public com.google.cloud.videointelligence.v1p3beta1.VideoSegment.Builder getSegmentBuilder() { - return getSegmentFieldBuilder().getBuilder(); - } - /** - * - * - *
-     * Non-streaming batch mode ONLY.
-     * Each object track corresponds to one video segment where it appears.
+     * Information corresponding to all frames where this object track appears.
+     * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
+     * messages in frames.
+     * Streaming mode: it can only be one ObjectTrackingFrame message in frames.
      * 
* - * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; + * repeated .google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame frames = 2; + * */ - public com.google.cloud.videointelligence.v1p3beta1.VideoSegmentOrBuilder - getSegmentOrBuilder() { - if ((trackInfoCase_ == 3) && (segmentBuilder_ != null)) { - return segmentBuilder_.getMessageOrBuilder(); + public java.util.List< + ? extends com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrameOrBuilder> + getFramesOrBuilderList() { + if (framesBuilder_ != null) { + return framesBuilder_.getMessageOrBuilderList(); } else { - if (trackInfoCase_ == 3) { - return (com.google.cloud.videointelligence.v1p3beta1.VideoSegment) trackInfo_; - } - return com.google.cloud.videointelligence.v1p3beta1.VideoSegment.getDefaultInstance(); + return java.util.Collections.unmodifiableList(frames_); } } /** * * *
-     * Non-streaming batch mode ONLY.
-     * Each object track corresponds to one video segment where it appears.
+     * Information corresponding to all frames where this object track appears.
+     * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
+     * messages in frames.
+     * Streaming mode: it can only be one ObjectTrackingFrame message in frames.
      * 
* - * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; + * repeated .google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame frames = 2; + * */ - private com.google.protobuf.SingleFieldBuilderV3< - com.google.cloud.videointelligence.v1p3beta1.VideoSegment, - com.google.cloud.videointelligence.v1p3beta1.VideoSegment.Builder, - com.google.cloud.videointelligence.v1p3beta1.VideoSegmentOrBuilder> - getSegmentFieldBuilder() { - if (segmentBuilder_ == null) { - if (!(trackInfoCase_ == 3)) { - trackInfo_ = - com.google.cloud.videointelligence.v1p3beta1.VideoSegment.getDefaultInstance(); - } - segmentBuilder_ = - new com.google.protobuf.SingleFieldBuilderV3< - com.google.cloud.videointelligence.v1p3beta1.VideoSegment, - com.google.cloud.videointelligence.v1p3beta1.VideoSegment.Builder, - com.google.cloud.videointelligence.v1p3beta1.VideoSegmentOrBuilder>( - (com.google.cloud.videointelligence.v1p3beta1.VideoSegment) trackInfo_, - getParentForChildren(), - isClean()); - trackInfo_ = null; - } - trackInfoCase_ = 3; - onChanged(); - ; - return segmentBuilder_; + public com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame.Builder + addFramesBuilder() { + return getFramesFieldBuilder() + .addBuilder( + com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame + .getDefaultInstance()); } - /** * * *
-     * Streaming mode ONLY.
-     * In streaming mode, we do not know the end time of a tracked object
-     * before it is completed. Hence, there is no VideoSegment info returned.
-     * Instead, we provide a unique identifiable integer track_id so that
-     * the customers can correlate the results of the ongoing
-     * ObjectTrackAnnotation of the same track_id over time.
+     * Information corresponding to all frames where this object track appears.
+     * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
+     * messages in frames.
+     * Streaming mode: it can only be one ObjectTrackingFrame message in frames.
      * 
* - * int64 track_id = 5; - * - * @return The trackId. + * repeated .google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame frames = 2; + * */ - public long getTrackId() { - if (trackInfoCase_ == 5) { - return (java.lang.Long) trackInfo_; - } - return 0L; + public com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame.Builder + addFramesBuilder(int index) { + return getFramesFieldBuilder() + .addBuilder( + index, + com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame + .getDefaultInstance()); } /** * * *
-     * Streaming mode ONLY.
-     * In streaming mode, we do not know the end time of a tracked object
-     * before it is completed. Hence, there is no VideoSegment info returned.
-     * Instead, we provide a unique identifiable integer track_id so that
-     * the customers can correlate the results of the ongoing
-     * ObjectTrackAnnotation of the same track_id over time.
+     * Information corresponding to all frames where this object track appears.
+     * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
+     * messages in frames.
+     * Streaming mode: it can only be one ObjectTrackingFrame message in frames.
      * 
* - * int64 track_id = 5; - * - * @param value The trackId to set. - * @return This builder for chaining. + * repeated .google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame frames = 2; + * */ - public Builder setTrackId(long value) { - trackInfoCase_ = 5; - trackInfo_ = value; - onChanged(); - return this; + public java.util.List + getFramesBuilderList() { + return getFramesFieldBuilder().getBuilderList(); } - /** - * - * - *
-     * Streaming mode ONLY.
-     * In streaming mode, we do not know the end time of a tracked object
-     * before it is completed. Hence, there is no VideoSegment info returned.
-     * Instead, we provide a unique identifiable integer track_id so that
-     * the customers can correlate the results of the ongoing
-     * ObjectTrackAnnotation of the same track_id over time.
-     * 
- * - * int64 track_id = 5; - * - * @return This builder for chaining. - */ - public Builder clearTrackId() { - if (trackInfoCase_ == 5) { - trackInfoCase_ = 0; - trackInfo_ = null; - onChanged(); + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame, + com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame.Builder, + com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrameOrBuilder> + getFramesFieldBuilder() { + if (framesBuilder_ == null) { + framesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame, + com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame.Builder, + com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrameOrBuilder>( + frames_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + frames_ = null; } - return this; + return framesBuilder_; } @java.lang.Override diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/ObjectTrackingAnnotationOrBuilder.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/ObjectTrackingAnnotationOrBuilder.java index fa8adf3a9..5408961d0 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/ObjectTrackingAnnotationOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/ObjectTrackingAnnotationOrBuilder.java @@ -23,6 +23,62 @@ public interface ObjectTrackingAnnotationOrBuilder // @@protoc_insertion_point(interface_extends:google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation) com.google.protobuf.MessageOrBuilder { + /** + * + * + *
+   * Non-streaming batch mode ONLY.
+   * Each object track corresponds to one video segment where it appears.
+   * 
+ * + * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; + * + * @return Whether the segment field is set. + */ + boolean hasSegment(); + /** + * + * + *
+   * Non-streaming batch mode ONLY.
+   * Each object track corresponds to one video segment where it appears.
+   * 
+ * + * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; + * + * @return The segment. + */ + com.google.cloud.videointelligence.v1p3beta1.VideoSegment getSegment(); + /** + * + * + *
+   * Non-streaming batch mode ONLY.
+   * Each object track corresponds to one video segment where it appears.
+   * 
+ * + * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; + */ + com.google.cloud.videointelligence.v1p3beta1.VideoSegmentOrBuilder getSegmentOrBuilder(); + + /** + * + * + *
+   * Streaming mode ONLY.
+   * In streaming mode, we do not know the end time of a tracked object
+   * before it is completed. Hence, there is no VideoSegment info returned.
+   * Instead, we provide a unique identifiable integer track_id so that
+   * the customers can correlate the results of the ongoing
+   * ObjectTrackAnnotation of the same track_id over time.
+   * 
+ * + * int64 track_id = 5; + * + * @return The trackId. + */ + long getTrackId(); + /** * * @@ -140,62 +196,6 @@ public interface ObjectTrackingAnnotationOrBuilder com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrameOrBuilder getFramesOrBuilder( int index); - /** - * - * - *
-   * Non-streaming batch mode ONLY.
-   * Each object track corresponds to one video segment where it appears.
-   * 
- * - * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; - * - * @return Whether the segment field is set. - */ - boolean hasSegment(); - /** - * - * - *
-   * Non-streaming batch mode ONLY.
-   * Each object track corresponds to one video segment where it appears.
-   * 
- * - * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; - * - * @return The segment. - */ - com.google.cloud.videointelligence.v1p3beta1.VideoSegment getSegment(); - /** - * - * - *
-   * Non-streaming batch mode ONLY.
-   * Each object track corresponds to one video segment where it appears.
-   * 
- * - * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 3; - */ - com.google.cloud.videointelligence.v1p3beta1.VideoSegmentOrBuilder getSegmentOrBuilder(); - - /** - * - * - *
-   * Streaming mode ONLY.
-   * In streaming mode, we do not know the end time of a tracked object
-   * before it is completed. Hence, there is no VideoSegment info returned.
-   * Instead, we provide a unique identifiable integer track_id so that
-   * the customers can correlate the results of the ongoing
-   * ObjectTrackAnnotation of the same track_id over time.
-   * 
- * - * int64 track_id = 5; - * - * @return The trackId. - */ - long getTrackId(); - public com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation.TrackInfoCase getTrackInfoCase(); } diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/PersonDetectionAnnotation.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/PersonDetectionAnnotation.java index ec4e43fd4..7d6a8e6f9 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/PersonDetectionAnnotation.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/PersonDetectionAnnotation.java @@ -127,7 +127,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-   * The trackes that a person is detected.
+   * The detected tracks of a person.
    * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Track tracks = 1; @@ -139,7 +139,7 @@ public java.util.List getTra * * *
-   * The trackes that a person is detected.
+   * The detected tracks of a person.
    * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Track tracks = 1; @@ -152,7 +152,7 @@ public java.util.List getTra * * *
-   * The trackes that a person is detected.
+   * The detected tracks of a person.
    * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Track tracks = 1; @@ -164,7 +164,7 @@ public int getTracksCount() { * * *
-   * The trackes that a person is detected.
+   * The detected tracks of a person.
    * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Track tracks = 1; @@ -176,7 +176,7 @@ public com.google.cloud.videointelligence.v1p3beta1.Track getTracks(int index) { * * *
-   * The trackes that a person is detected.
+   * The detected tracks of a person.
    * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Track tracks = 1; @@ -576,7 +576,7 @@ private void ensureTracksIsMutable() { * * *
-     * The trackes that a person is detected.
+     * The detected tracks of a person.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Track tracks = 1; @@ -592,7 +592,7 @@ public java.util.List getTra * * *
-     * The trackes that a person is detected.
+     * The detected tracks of a person.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Track tracks = 1; @@ -608,7 +608,7 @@ public int getTracksCount() { * * *
-     * The trackes that a person is detected.
+     * The detected tracks of a person.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Track tracks = 1; @@ -624,7 +624,7 @@ public com.google.cloud.videointelligence.v1p3beta1.Track getTracks(int index) { * * *
-     * The trackes that a person is detected.
+     * The detected tracks of a person.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Track tracks = 1; @@ -646,7 +646,7 @@ public Builder setTracks(int index, com.google.cloud.videointelligence.v1p3beta1 * * *
-     * The trackes that a person is detected.
+     * The detected tracks of a person.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Track tracks = 1; @@ -666,7 +666,7 @@ public Builder setTracks( * * *
-     * The trackes that a person is detected.
+     * The detected tracks of a person.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Track tracks = 1; @@ -688,7 +688,7 @@ public Builder addTracks(com.google.cloud.videointelligence.v1p3beta1.Track valu * * *
-     * The trackes that a person is detected.
+     * The detected tracks of a person.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Track tracks = 1; @@ -710,7 +710,7 @@ public Builder addTracks(int index, com.google.cloud.videointelligence.v1p3beta1 * * *
-     * The trackes that a person is detected.
+     * The detected tracks of a person.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Track tracks = 1; @@ -730,7 +730,7 @@ public Builder addTracks( * * *
-     * The trackes that a person is detected.
+     * The detected tracks of a person.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Track tracks = 1; @@ -750,7 +750,7 @@ public Builder addTracks( * * *
-     * The trackes that a person is detected.
+     * The detected tracks of a person.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Track tracks = 1; @@ -770,7 +770,7 @@ public Builder addAllTracks( * * *
-     * The trackes that a person is detected.
+     * The detected tracks of a person.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Track tracks = 1; @@ -789,7 +789,7 @@ public Builder clearTracks() { * * *
-     * The trackes that a person is detected.
+     * The detected tracks of a person.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Track tracks = 1; @@ -808,7 +808,7 @@ public Builder removeTracks(int index) { * * *
-     * The trackes that a person is detected.
+     * The detected tracks of a person.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Track tracks = 1; @@ -820,7 +820,7 @@ public com.google.cloud.videointelligence.v1p3beta1.Track.Builder getTracksBuild * * *
-     * The trackes that a person is detected.
+     * The detected tracks of a person.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Track tracks = 1; @@ -837,7 +837,7 @@ public com.google.cloud.videointelligence.v1p3beta1.TrackOrBuilder getTracksOrBu * * *
-     * The trackes that a person is detected.
+     * The detected tracks of a person.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Track tracks = 1; @@ -854,7 +854,7 @@ public com.google.cloud.videointelligence.v1p3beta1.TrackOrBuilder getTracksOrBu * * *
-     * The trackes that a person is detected.
+     * The detected tracks of a person.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Track tracks = 1; @@ -867,7 +867,7 @@ public com.google.cloud.videointelligence.v1p3beta1.Track.Builder addTracksBuild * * *
-     * The trackes that a person is detected.
+     * The detected tracks of a person.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Track tracks = 1; @@ -881,7 +881,7 @@ public com.google.cloud.videointelligence.v1p3beta1.Track.Builder addTracksBuild * * *
-     * The trackes that a person is detected.
+     * The detected tracks of a person.
      * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Track tracks = 1; diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/PersonDetectionAnnotationOrBuilder.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/PersonDetectionAnnotationOrBuilder.java index d43529316..315ecb9b9 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/PersonDetectionAnnotationOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/PersonDetectionAnnotationOrBuilder.java @@ -27,7 +27,7 @@ public interface PersonDetectionAnnotationOrBuilder * * *
-   * The trackes that a person is detected.
+   * The detected tracks of a person.
    * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Track tracks = 1; @@ -37,7 +37,7 @@ public interface PersonDetectionAnnotationOrBuilder * * *
-   * The trackes that a person is detected.
+   * The detected tracks of a person.
    * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Track tracks = 1; @@ -47,7 +47,7 @@ public interface PersonDetectionAnnotationOrBuilder * * *
-   * The trackes that a person is detected.
+   * The detected tracks of a person.
    * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Track tracks = 1; @@ -57,7 +57,7 @@ public interface PersonDetectionAnnotationOrBuilder * * *
-   * The trackes that a person is detected.
+   * The detected tracks of a person.
    * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Track tracks = 1; @@ -68,7 +68,7 @@ public interface PersonDetectionAnnotationOrBuilder * * *
-   * The trackes that a person is detected.
+   * The detected tracks of a person.
    * 
* * repeated .google.cloud.videointelligence.v1p3beta1.Track tracks = 1; diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/PersonDetectionConfig.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/PersonDetectionConfig.java index 1d1bbc050..5b0eed7a0 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/PersonDetectionConfig.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/PersonDetectionConfig.java @@ -123,7 +123,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-   * Whether bounding boxes be included in the person detection annotation
+   * Whether bounding boxes are included in the person detection annotation
    * output.
    * 
* @@ -142,7 +142,7 @@ public boolean getIncludeBoundingBoxes() { * *
    * Whether to enable pose landmarks detection. Ignored if
-   * 'include_bounding_boxes' is false.
+   * 'include_bounding_boxes' is set to false.
    * 
* * bool include_pose_landmarks = 2; @@ -160,9 +160,9 @@ public boolean getIncludePoseLandmarks() { * *
    * Whether to enable person attributes detection, such as cloth color (black,
-   * blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair
-   * color (black, blonde, etc), hair length (long, short, bald), etc.
-   * Ignored if 'include_bounding_boxes' is false.
+   * blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair,
+   * etc.
+   * Ignored if 'include_bounding_boxes' is set to false.
    * 
* * bool include_attributes = 3; @@ -534,7 +534,7 @@ public Builder mergeFrom( * * *
-     * Whether bounding boxes be included in the person detection annotation
+     * Whether bounding boxes are included in the person detection annotation
      * output.
      * 
* @@ -549,7 +549,7 @@ public boolean getIncludeBoundingBoxes() { * * *
-     * Whether bounding boxes be included in the person detection annotation
+     * Whether bounding boxes are included in the person detection annotation
      * output.
      * 
* @@ -568,7 +568,7 @@ public Builder setIncludeBoundingBoxes(boolean value) { * * *
-     * Whether bounding boxes be included in the person detection annotation
+     * Whether bounding boxes are included in the person detection annotation
      * output.
      * 
* @@ -589,7 +589,7 @@ public Builder clearIncludeBoundingBoxes() { * *
      * Whether to enable pose landmarks detection. Ignored if
-     * 'include_bounding_boxes' is false.
+     * 'include_bounding_boxes' is set to false.
      * 
* * bool include_pose_landmarks = 2; @@ -604,7 +604,7 @@ public boolean getIncludePoseLandmarks() { * *
      * Whether to enable pose landmarks detection. Ignored if
-     * 'include_bounding_boxes' is false.
+     * 'include_bounding_boxes' is set to false.
      * 
* * bool include_pose_landmarks = 2; @@ -623,7 +623,7 @@ public Builder setIncludePoseLandmarks(boolean value) { * *
      * Whether to enable pose landmarks detection. Ignored if
-     * 'include_bounding_boxes' is false.
+     * 'include_bounding_boxes' is set to false.
      * 
* * bool include_pose_landmarks = 2; @@ -643,9 +643,9 @@ public Builder clearIncludePoseLandmarks() { * *
      * Whether to enable person attributes detection, such as cloth color (black,
-     * blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair
-     * color (black, blonde, etc), hair length (long, short, bald), etc.
-     * Ignored if 'include_bounding_boxes' is false.
+     * blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair,
+     * etc.
+     * Ignored if 'include_bounding_boxes' is set to false.
      * 
* * bool include_attributes = 3; @@ -660,9 +660,9 @@ public boolean getIncludeAttributes() { * *
      * Whether to enable person attributes detection, such as cloth color (black,
-     * blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair
-     * color (black, blonde, etc), hair length (long, short, bald), etc.
-     * Ignored if 'include_bounding_boxes' is false.
+     * blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair,
+     * etc.
+     * Ignored if 'include_bounding_boxes' is set to false.
      * 
* * bool include_attributes = 3; @@ -681,9 +681,9 @@ public Builder setIncludeAttributes(boolean value) { * *
      * Whether to enable person attributes detection, such as cloth color (black,
-     * blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair
-     * color (black, blonde, etc), hair length (long, short, bald), etc.
-     * Ignored if 'include_bounding_boxes' is false.
+     * blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair,
+     * etc.
+     * Ignored if 'include_bounding_boxes' is set to false.
      * 
* * bool include_attributes = 3; diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/PersonDetectionConfigOrBuilder.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/PersonDetectionConfigOrBuilder.java index 4d4742089..dcf4482a2 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/PersonDetectionConfigOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/PersonDetectionConfigOrBuilder.java @@ -27,7 +27,7 @@ public interface PersonDetectionConfigOrBuilder * * *
-   * Whether bounding boxes be included in the person detection annotation
+   * Whether bounding boxes are included in the person detection annotation
    * output.
    * 
* @@ -42,7 +42,7 @@ public interface PersonDetectionConfigOrBuilder * *
    * Whether to enable pose landmarks detection. Ignored if
-   * 'include_bounding_boxes' is false.
+   * 'include_bounding_boxes' is set to false.
    * 
* * bool include_pose_landmarks = 2; @@ -56,9 +56,9 @@ public interface PersonDetectionConfigOrBuilder * *
    * Whether to enable person attributes detection, such as cloth color (black,
-   * blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair
-   * color (black, blonde, etc), hair length (long, short, bald), etc.
-   * Ignored if 'include_bounding_boxes' is false.
+   * blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair,
+   * etc.
+   * Ignored if 'include_bounding_boxes' is set to false.
    * 
* * bool include_attributes = 3; diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/SpeechRecognitionAlternative.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/SpeechRecognitionAlternative.java index b13e09065..993c6a551 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/SpeechRecognitionAlternative.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/SpeechRecognitionAlternative.java @@ -212,8 +212,8 @@ public float getConfidence() { * *
    * Output only. A list of word-specific information for each recognized word.
-   * Note: When `enable_speaker_diarization` is true, you will see all the words
-   * from the beginning of the audio.
+   * Note: When `enable_speaker_diarization` is set to true, you will see all
+   * the words from the beginning of the audio.
    * 
* * @@ -228,8 +228,8 @@ public java.util.List get * *
    * Output only. A list of word-specific information for each recognized word.
-   * Note: When `enable_speaker_diarization` is true, you will see all the words
-   * from the beginning of the audio.
+   * Note: When `enable_speaker_diarization` is set to true, you will see all
+   * the words from the beginning of the audio.
    * 
* * @@ -245,8 +245,8 @@ public java.util.List get * *
    * Output only. A list of word-specific information for each recognized word.
-   * Note: When `enable_speaker_diarization` is true, you will see all the words
-   * from the beginning of the audio.
+   * Note: When `enable_speaker_diarization` is set to true, you will see all
+   * the words from the beginning of the audio.
    * 
* * @@ -261,8 +261,8 @@ public int getWordsCount() { * *
    * Output only. A list of word-specific information for each recognized word.
-   * Note: When `enable_speaker_diarization` is true, you will see all the words
-   * from the beginning of the audio.
+   * Note: When `enable_speaker_diarization` is set to true, you will see all
+   * the words from the beginning of the audio.
    * 
* * @@ -277,8 +277,8 @@ public com.google.cloud.videointelligence.v1p3beta1.WordInfo getWords(int index) * *
    * Output only. A list of word-specific information for each recognized word.
-   * Note: When `enable_speaker_diarization` is true, you will see all the words
-   * from the beginning of the audio.
+   * Note: When `enable_speaker_diarization` is set to true, you will see all
+   * the words from the beginning of the audio.
    * 
* * @@ -891,8 +891,8 @@ private void ensureWordsIsMutable() { * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -911,8 +911,8 @@ public java.util.List get * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -931,8 +931,8 @@ public int getWordsCount() { * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -951,8 +951,8 @@ public com.google.cloud.videointelligence.v1p3beta1.WordInfo getWords(int index) * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -978,8 +978,8 @@ public Builder setWords( * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -1002,8 +1002,8 @@ public Builder setWords( * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -1028,8 +1028,8 @@ public Builder addWords(com.google.cloud.videointelligence.v1p3beta1.WordInfo va * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -1055,8 +1055,8 @@ public Builder addWords( * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -1079,8 +1079,8 @@ public Builder addWords( * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -1103,8 +1103,8 @@ public Builder addWords( * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -1128,8 +1128,8 @@ public Builder addAllWords( * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -1151,8 +1151,8 @@ public Builder clearWords() { * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -1174,8 +1174,8 @@ public Builder removeWords(int index) { * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -1191,8 +1191,8 @@ public com.google.cloud.videointelligence.v1p3beta1.WordInfo.Builder getWordsBui * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -1212,8 +1212,8 @@ public com.google.cloud.videointelligence.v1p3beta1.WordInfoOrBuilder getWordsOr * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -1233,8 +1233,8 @@ public com.google.cloud.videointelligence.v1p3beta1.WordInfoOrBuilder getWordsOr * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -1250,8 +1250,8 @@ public com.google.cloud.videointelligence.v1p3beta1.WordInfo.Builder addWordsBui * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -1269,8 +1269,8 @@ public com.google.cloud.videointelligence.v1p3beta1.WordInfo.Builder addWordsBui * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/SpeechRecognitionAlternativeOrBuilder.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/SpeechRecognitionAlternativeOrBuilder.java index 65b7901fd..e9f89d22a 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/SpeechRecognitionAlternativeOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/SpeechRecognitionAlternativeOrBuilder.java @@ -71,8 +71,8 @@ public interface SpeechRecognitionAlternativeOrBuilder * *
    * Output only. A list of word-specific information for each recognized word.
-   * Note: When `enable_speaker_diarization` is true, you will see all the words
-   * from the beginning of the audio.
+   * Note: When `enable_speaker_diarization` is set to true, you will see all
+   * the words from the beginning of the audio.
    * 
* * @@ -85,8 +85,8 @@ public interface SpeechRecognitionAlternativeOrBuilder * *
    * Output only. A list of word-specific information for each recognized word.
-   * Note: When `enable_speaker_diarization` is true, you will see all the words
-   * from the beginning of the audio.
+   * Note: When `enable_speaker_diarization` is set to true, you will see all
+   * the words from the beginning of the audio.
    * 
* * @@ -99,8 +99,8 @@ public interface SpeechRecognitionAlternativeOrBuilder * *
    * Output only. A list of word-specific information for each recognized word.
-   * Note: When `enable_speaker_diarization` is true, you will see all the words
-   * from the beginning of the audio.
+   * Note: When `enable_speaker_diarization` is set to true, you will see all
+   * the words from the beginning of the audio.
    * 
* * @@ -113,8 +113,8 @@ public interface SpeechRecognitionAlternativeOrBuilder * *
    * Output only. A list of word-specific information for each recognized word.
-   * Note: When `enable_speaker_diarization` is true, you will see all the words
-   * from the beginning of the audio.
+   * Note: When `enable_speaker_diarization` is set to true, you will see all
+   * the words from the beginning of the audio.
    * 
* * @@ -128,8 +128,8 @@ public interface SpeechRecognitionAlternativeOrBuilder * *
    * Output only. A list of word-specific information for each recognized word.
-   * Note: When `enable_speaker_diarization` is true, you will see all the words
-   * from the beginning of the audio.
+   * Note: When `enable_speaker_diarization` is set to true, you will see all
+   * the words from the beginning of the audio.
    * 
* * diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/SpeechTranscriptionConfig.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/SpeechTranscriptionConfig.java index 99f3adaec..037643d53 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/SpeechTranscriptionConfig.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/SpeechTranscriptionConfig.java @@ -444,7 +444,7 @@ public int getAudioTracks(int index) { * the top alternative of the recognition result using a speaker_tag provided * in the WordInfo. * Note: When this is true, we send all the words from the beginning of the - * audio for the top alternative in every consecutive responses. + * audio for the top alternative in every consecutive response. * This is done in order to improve our speaker tags as our models learn to * identify the speakers in the conversation over time. * @@ -1877,7 +1877,7 @@ public Builder clearAudioTracks() { * the top alternative of the recognition result using a speaker_tag provided * in the WordInfo. * Note: When this is true, we send all the words from the beginning of the - * audio for the top alternative in every consecutive responses. + * audio for the top alternative in every consecutive response. * This is done in order to improve our speaker tags as our models learn to * identify the speakers in the conversation over time. * @@ -1897,7 +1897,7 @@ public boolean getEnableSpeakerDiarization() { * the top alternative of the recognition result using a speaker_tag provided * in the WordInfo. * Note: When this is true, we send all the words from the beginning of the - * audio for the top alternative in every consecutive responses. + * audio for the top alternative in every consecutive response. * This is done in order to improve our speaker tags as our models learn to * identify the speakers in the conversation over time. * @@ -1921,7 +1921,7 @@ public Builder setEnableSpeakerDiarization(boolean value) { * the top alternative of the recognition result using a speaker_tag provided * in the WordInfo. * Note: When this is true, we send all the words from the beginning of the - * audio for the top alternative in every consecutive responses. + * audio for the top alternative in every consecutive response. * This is done in order to improve our speaker tags as our models learn to * identify the speakers in the conversation over time. * diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/SpeechTranscriptionConfigOrBuilder.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/SpeechTranscriptionConfigOrBuilder.java index 8d745362a..23bf8e4d3 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/SpeechTranscriptionConfigOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/SpeechTranscriptionConfigOrBuilder.java @@ -220,7 +220,7 @@ com.google.cloud.videointelligence.v1p3beta1.SpeechContextOrBuilder getSpeechCon * the top alternative of the recognition result using a speaker_tag provided * in the WordInfo. * Note: When this is true, we send all the words from the beginning of the - * audio for the top alternative in every consecutive responses. + * audio for the top alternative in every consecutive response. * This is done in order to improve our speaker tags as our models learn to * identify the speakers in the conversation over time. * diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingAnnotateVideoResponse.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingAnnotateVideoResponse.java index 8efb2e9a6..5f1d65cfa 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingAnnotateVideoResponse.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingAnnotateVideoResponse.java @@ -255,10 +255,10 @@ public boolean hasAnnotationResults() { * * *
-   * GCS URI that stores annotation results of one streaming session.
-   * It is a directory that can hold multiple files in JSON format.
-   * Example uri format:
-   * gs://bucket_id/object_id/cloud_project_name-session_id
+   * Google Cloud Storage(GCS) URI that stores annotation results of one
+   * streaming session in JSON format.
+   * It is the annotation_result_storage_directory
+   * from the request followed by '/cloud_project_number-session_id'.
    * 
* * string annotation_results_uri = 3; @@ -280,10 +280,10 @@ public java.lang.String getAnnotationResultsUri() { * * *
-   * GCS URI that stores annotation results of one streaming session.
-   * It is a directory that can hold multiple files in JSON format.
-   * Example uri format:
-   * gs://bucket_id/object_id/cloud_project_name-session_id
+   * Google Cloud Storage(GCS) URI that stores annotation results of one
+   * streaming session in JSON format.
+   * It is the annotation_result_storage_directory
+   * from the request followed by '/cloud_project_number-session_id'.
    * 
* * string annotation_results_uri = 3; @@ -1101,10 +1101,10 @@ public Builder clearAnnotationResults() { * * *
-     * GCS URI that stores annotation results of one streaming session.
-     * It is a directory that can hold multiple files in JSON format.
-     * Example uri format:
-     * gs://bucket_id/object_id/cloud_project_name-session_id
+     * Google Cloud Storage(GCS) URI that stores annotation results of one
+     * streaming session in JSON format.
+     * It is the annotation_result_storage_directory
+     * from the request followed by '/cloud_project_number-session_id'.
      * 
* * string annotation_results_uri = 3; @@ -1126,10 +1126,10 @@ public java.lang.String getAnnotationResultsUri() { * * *
-     * GCS URI that stores annotation results of one streaming session.
-     * It is a directory that can hold multiple files in JSON format.
-     * Example uri format:
-     * gs://bucket_id/object_id/cloud_project_name-session_id
+     * Google Cloud Storage(GCS) URI that stores annotation results of one
+     * streaming session in JSON format.
+     * It is the annotation_result_storage_directory
+     * from the request followed by '/cloud_project_number-session_id'.
      * 
* * string annotation_results_uri = 3; @@ -1151,10 +1151,10 @@ public com.google.protobuf.ByteString getAnnotationResultsUriBytes() { * * *
-     * GCS URI that stores annotation results of one streaming session.
-     * It is a directory that can hold multiple files in JSON format.
-     * Example uri format:
-     * gs://bucket_id/object_id/cloud_project_name-session_id
+     * Google Cloud Storage(GCS) URI that stores annotation results of one
+     * streaming session in JSON format.
+     * It is the annotation_result_storage_directory
+     * from the request followed by '/cloud_project_number-session_id'.
      * 
* * string annotation_results_uri = 3; @@ -1175,10 +1175,10 @@ public Builder setAnnotationResultsUri(java.lang.String value) { * * *
-     * GCS URI that stores annotation results of one streaming session.
-     * It is a directory that can hold multiple files in JSON format.
-     * Example uri format:
-     * gs://bucket_id/object_id/cloud_project_name-session_id
+     * Google Cloud Storage(GCS) URI that stores annotation results of one
+     * streaming session in JSON format.
+     * It is the annotation_result_storage_directory
+     * from the request followed by '/cloud_project_number-session_id'.
      * 
* * string annotation_results_uri = 3; @@ -1195,10 +1195,10 @@ public Builder clearAnnotationResultsUri() { * * *
-     * GCS URI that stores annotation results of one streaming session.
-     * It is a directory that can hold multiple files in JSON format.
-     * Example uri format:
-     * gs://bucket_id/object_id/cloud_project_name-session_id
+     * Google Cloud Storage(GCS) URI that stores annotation results of one
+     * streaming session in JSON format.
+     * It is the annotation_result_storage_directory
+     * from the request followed by '/cloud_project_number-session_id'.
      * 
* * string annotation_results_uri = 3; diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingAnnotateVideoResponseOrBuilder.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingAnnotateVideoResponseOrBuilder.java index f5e079d9e..f1b2677bc 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingAnnotateVideoResponseOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingAnnotateVideoResponseOrBuilder.java @@ -108,10 +108,10 @@ public interface StreamingAnnotateVideoResponseOrBuilder * * *
-   * GCS URI that stores annotation results of one streaming session.
-   * It is a directory that can hold multiple files in JSON format.
-   * Example uri format:
-   * gs://bucket_id/object_id/cloud_project_name-session_id
+   * Google Cloud Storage(GCS) URI that stores annotation results of one
+   * streaming session in JSON format.
+   * It is the annotation_result_storage_directory
+   * from the request followed by '/cloud_project_number-session_id'.
    * 
* * string annotation_results_uri = 3; @@ -123,10 +123,10 @@ public interface StreamingAnnotateVideoResponseOrBuilder * * *
-   * GCS URI that stores annotation results of one streaming session.
-   * It is a directory that can hold multiple files in JSON format.
-   * Example uri format:
-   * gs://bucket_id/object_id/cloud_project_name-session_id
+   * Google Cloud Storage(GCS) URI that stores annotation results of one
+   * streaming session in JSON format.
+   * It is the annotation_result_storage_directory
+   * from the request followed by '/cloud_project_number-session_id'.
    * 
* * string annotation_results_uri = 3; diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingAutomlActionRecognitionConfig.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingAutomlActionRecognitionConfig.java new file mode 100644 index 000000000..d63f7d5e7 --- /dev/null +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingAutomlActionRecognitionConfig.java @@ -0,0 +1,679 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/videointelligence/v1p3beta1/video_intelligence.proto + +package com.google.cloud.videointelligence.v1p3beta1; + +/** + * + * + *
+ * Config for STREAMING_AUTOML_ACTION_RECOGNITION.
+ * 
+ * + * Protobuf type {@code + * google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig} + */ +public final class StreamingAutomlActionRecognitionConfig + extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig) + StreamingAutomlActionRecognitionConfigOrBuilder { + private static final long serialVersionUID = 0L; + // Use StreamingAutomlActionRecognitionConfig.newBuilder() to construct. + private StreamingAutomlActionRecognitionConfig( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private StreamingAutomlActionRecognitionConfig() { + modelName_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new StreamingAutomlActionRecognitionConfig(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private StreamingAutomlActionRecognitionConfig( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + modelName_ = s; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.videointelligence.v1p3beta1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlActionRecognitionConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.videointelligence.v1p3beta1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlActionRecognitionConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + .class, + com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + .Builder.class); + } + + public static final int MODEL_NAME_FIELD_NUMBER = 1; + private volatile java.lang.Object modelName_; + /** + * + * + *
+   * Resource name of AutoML model.
+   * Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
+   * 
+ * + * string model_name = 1; + * + * @return The modelName. + */ + public java.lang.String getModelName() { + java.lang.Object ref = modelName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + modelName_ = s; + return s; + } + } + /** + * + * + *
+   * Resource name of AutoML model.
+   * Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
+   * 
+ * + * string model_name = 1; + * + * @return The bytes for modelName. + */ + public com.google.protobuf.ByteString getModelNameBytes() { + java.lang.Object ref = modelName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + modelName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getModelNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, modelName_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getModelNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, modelName_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig)) { + return super.equals(obj); + } + com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig other = + (com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig) obj; + + if (!getModelName().equals(other.getModelName())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + MODEL_NAME_FIELD_NUMBER; + hash = (53 * hash) + getModelName().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Config for STREAMING_AUTOML_ACTION_RECOGNITION.
+   * 
+ * + * Protobuf type {@code + * google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig) + com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.videointelligence.v1p3beta1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlActionRecognitionConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.videointelligence.v1p3beta1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlActionRecognitionConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + .class, + com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + .Builder.class); + } + + // Construct using + // com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + modelName_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.videointelligence.v1p3beta1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlActionRecognitionConfig_descriptor; + } + + @java.lang.Override + public com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + getDefaultInstanceForType() { + return com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + build() { + com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + buildPartial() { + com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig result = + new com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig( + this); + result.modelName_ = modelName_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig) { + return mergeFrom( + (com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig) + other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig other) { + if (other + == com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + .getDefaultInstance()) return this; + if (!other.getModelName().isEmpty()) { + modelName_ = other.modelName_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object modelName_ = ""; + /** + * + * + *
+     * Resource name of AutoML model.
+     * Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
+     * 
+ * + * string model_name = 1; + * + * @return The modelName. + */ + public java.lang.String getModelName() { + java.lang.Object ref = modelName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + modelName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Resource name of AutoML model.
+     * Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
+     * 
+ * + * string model_name = 1; + * + * @return The bytes for modelName. + */ + public com.google.protobuf.ByteString getModelNameBytes() { + java.lang.Object ref = modelName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + modelName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Resource name of AutoML model.
+     * Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
+     * 
+ * + * string model_name = 1; + * + * @param value The modelName to set. + * @return This builder for chaining. + */ + public Builder setModelName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + modelName_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Resource name of AutoML model.
+     * Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
+     * 
+ * + * string model_name = 1; + * + * @return This builder for chaining. + */ + public Builder clearModelName() { + + modelName_ = getDefaultInstance().getModelName(); + onChanged(); + return this; + } + /** + * + * + *
+     * Resource name of AutoML model.
+     * Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
+     * 
+ * + * string model_name = 1; + * + * @param value The bytes for modelName to set. + * @return This builder for chaining. + */ + public Builder setModelNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + modelName_ = value; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig) + } + + // @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig) + private static final com.google.cloud.videointelligence.v1p3beta1 + .StreamingAutomlActionRecognitionConfig + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig(); + } + + public static com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StreamingAutomlActionRecognitionConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new StreamingAutomlActionRecognitionConfig(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingAutomlActionRecognitionConfigOrBuilder.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingAutomlActionRecognitionConfigOrBuilder.java new file mode 100644 index 000000000..56f53d3b3 --- /dev/null +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingAutomlActionRecognitionConfigOrBuilder.java @@ -0,0 +1,52 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/videointelligence/v1p3beta1/video_intelligence.proto + +package com.google.cloud.videointelligence.v1p3beta1; + +public interface StreamingAutomlActionRecognitionConfigOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Resource name of AutoML model.
+   * Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
+   * 
+ * + * string model_name = 1; + * + * @return The modelName. + */ + java.lang.String getModelName(); + /** + * + * + *
+   * Resource name of AutoML model.
+   * Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
+   * 
+ * + * string model_name = 1; + * + * @return The bytes for modelName. + */ + com.google.protobuf.ByteString getModelNameBytes(); +} diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingAutomlClassificationConfig.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingAutomlClassificationConfig.java index 86688f1e6..7733bada6 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingAutomlClassificationConfig.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingAutomlClassificationConfig.java @@ -122,7 +122,8 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * *
    * Resource name of AutoML model.
-   * Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
+   * Format:
+   * `projects/{project_number}/locations/{location_id}/models/{model_id}`
    * 
* * string model_name = 1; @@ -145,7 +146,8 @@ public java.lang.String getModelName() { * *
    * Resource name of AutoML model.
-   * Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
+   * Format:
+   * `projects/{project_number}/locations/{location_id}/models/{model_id}`
    * 
* * string model_name = 1; @@ -515,7 +517,8 @@ public Builder mergeFrom( * *
      * Resource name of AutoML model.
-     * Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
+     * Format:
+     * `projects/{project_number}/locations/{location_id}/models/{model_id}`
      * 
* * string model_name = 1; @@ -538,7 +541,8 @@ public java.lang.String getModelName() { * *
      * Resource name of AutoML model.
-     * Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
+     * Format:
+     * `projects/{project_number}/locations/{location_id}/models/{model_id}`
      * 
* * string model_name = 1; @@ -561,7 +565,8 @@ public com.google.protobuf.ByteString getModelNameBytes() { * *
      * Resource name of AutoML model.
-     * Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
+     * Format:
+     * `projects/{project_number}/locations/{location_id}/models/{model_id}`
      * 
* * string model_name = 1; @@ -583,7 +588,8 @@ public Builder setModelName(java.lang.String value) { * *
      * Resource name of AutoML model.
-     * Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
+     * Format:
+     * `projects/{project_number}/locations/{location_id}/models/{model_id}`
      * 
* * string model_name = 1; @@ -601,7 +607,8 @@ public Builder clearModelName() { * *
      * Resource name of AutoML model.
-     * Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
+     * Format:
+     * `projects/{project_number}/locations/{location_id}/models/{model_id}`
      * 
* * string model_name = 1; diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingAutomlClassificationConfigOrBuilder.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingAutomlClassificationConfigOrBuilder.java index 8625e8704..017b12a71 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingAutomlClassificationConfigOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingAutomlClassificationConfigOrBuilder.java @@ -28,7 +28,8 @@ public interface StreamingAutomlClassificationConfigOrBuilder * *
    * Resource name of AutoML model.
-   * Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
+   * Format:
+   * `projects/{project_number}/locations/{location_id}/models/{model_id}`
    * 
* * string model_name = 1; @@ -41,7 +42,8 @@ public interface StreamingAutomlClassificationConfigOrBuilder * *
    * Resource name of AutoML model.
-   * Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
+   * Format:
+   * `projects/{project_number}/locations/{location_id}/models/{model_id}`
    * 
* * string model_name = 1; diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingFeature.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingFeature.java index 268c936e3..962ba7a7f 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingFeature.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingFeature.java @@ -78,6 +78,16 @@ public enum StreamingFeature implements com.google.protobuf.ProtocolMessageEnum * STREAMING_OBJECT_TRACKING = 4; */ STREAMING_OBJECT_TRACKING(4), + /** + * + * + *
+   * Action recognition based on AutoML model.
+   * 
+ * + * STREAMING_AUTOML_ACTION_RECOGNITION = 23; + */ + STREAMING_AUTOML_ACTION_RECOGNITION(23), /** * * @@ -151,6 +161,16 @@ public enum StreamingFeature implements com.google.protobuf.ProtocolMessageEnum * STREAMING_OBJECT_TRACKING = 4; */ public static final int STREAMING_OBJECT_TRACKING_VALUE = 4; + /** + * + * + *
+   * Action recognition based on AutoML model.
+   * 
+ * + * STREAMING_AUTOML_ACTION_RECOGNITION = 23; + */ + public static final int STREAMING_AUTOML_ACTION_RECOGNITION_VALUE = 23; /** * * @@ -206,6 +226,8 @@ public static StreamingFeature forNumber(int value) { return STREAMING_EXPLICIT_CONTENT_DETECTION; case 4: return STREAMING_OBJECT_TRACKING; + case 23: + return STREAMING_AUTOML_ACTION_RECOGNITION; case 21: return STREAMING_AUTOML_CLASSIFICATION; case 22: @@ -238,7 +260,7 @@ public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor return com.google.cloud.videointelligence.v1p3beta1.VideoIntelligenceServiceProto .getDescriptor() .getEnumTypes() - .get(3); + .get(2); } private static final StreamingFeature[] VALUES = values(); diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingStorageConfig.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingStorageConfig.java index 071104fd7..d8688de31 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingStorageConfig.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingStorageConfig.java @@ -139,16 +139,16 @@ public boolean getEnableStorageAnnotationResult() { * * *
-   * GCS URI to store all annotation results for one client. Client should
-   * specify this field as the top-level storage directory. Annotation results
-   * of different sessions will be put into different sub-directories denoted
-   * by project_name and session_id. All sub-directories will be auto generated
-   * by program and will be made accessible to client in response proto.
-   * URIs must be specified in the following format: `gs://bucket-id/object-id`
-   * `bucket-id` should be a valid GCS bucket created by client and bucket
-   * permission shall also be configured properly. `object-id` can be arbitrary
-   * string that make sense to client. Other URI formats will return error and
-   * cause GCS write failure.
+   * Cloud Storage URI to store all annotation results for one client. Client
+   * should specify this field as the top-level storage directory. Annotation
+   * results of different sessions will be put into different sub-directories
+   * denoted by project_name and session_id. All sub-directories will be auto
+   * generated by program and will be made accessible to client in response
+   * proto. URIs must be specified in the following format:
+   * `gs://bucket-id/object-id` `bucket-id` should be a valid Cloud Storage
+   * bucket created by client and bucket permission shall also be configured
+   * properly. `object-id` can be arbitrary string that make sense to client.
+   * Other URI formats will return error and cause Cloud Storage write failure.
    * 
* * string annotation_result_storage_directory = 3; @@ -170,16 +170,16 @@ public java.lang.String getAnnotationResultStorageDirectory() { * * *
-   * GCS URI to store all annotation results for one client. Client should
-   * specify this field as the top-level storage directory. Annotation results
-   * of different sessions will be put into different sub-directories denoted
-   * by project_name and session_id. All sub-directories will be auto generated
-   * by program and will be made accessible to client in response proto.
-   * URIs must be specified in the following format: `gs://bucket-id/object-id`
-   * `bucket-id` should be a valid GCS bucket created by client and bucket
-   * permission shall also be configured properly. `object-id` can be arbitrary
-   * string that make sense to client. Other URI formats will return error and
-   * cause GCS write failure.
+   * Cloud Storage URI to store all annotation results for one client. Client
+   * should specify this field as the top-level storage directory. Annotation
+   * results of different sessions will be put into different sub-directories
+   * denoted by project_name and session_id. All sub-directories will be auto
+   * generated by program and will be made accessible to client in response
+   * proto. URIs must be specified in the following format:
+   * `gs://bucket-id/object-id` `bucket-id` should be a valid Cloud Storage
+   * bucket created by client and bucket permission shall also be configured
+   * properly. `object-id` can be arbitrary string that make sense to client.
+   * Other URI formats will return error and cause Cloud Storage write failure.
    * 
* * string annotation_result_storage_directory = 3; @@ -603,16 +603,16 @@ public Builder clearEnableStorageAnnotationResult() { * * *
-     * GCS URI to store all annotation results for one client. Client should
-     * specify this field as the top-level storage directory. Annotation results
-     * of different sessions will be put into different sub-directories denoted
-     * by project_name and session_id. All sub-directories will be auto generated
-     * by program and will be made accessible to client in response proto.
-     * URIs must be specified in the following format: `gs://bucket-id/object-id`
-     * `bucket-id` should be a valid GCS bucket created by client and bucket
-     * permission shall also be configured properly. `object-id` can be arbitrary
-     * string that make sense to client. Other URI formats will return error and
-     * cause GCS write failure.
+     * Cloud Storage URI to store all annotation results for one client. Client
+     * should specify this field as the top-level storage directory. Annotation
+     * results of different sessions will be put into different sub-directories
+     * denoted by project_name and session_id. All sub-directories will be auto
+     * generated by program and will be made accessible to client in response
+     * proto. URIs must be specified in the following format:
+     * `gs://bucket-id/object-id` `bucket-id` should be a valid Cloud Storage
+     * bucket created by client and bucket permission shall also be configured
+     * properly. `object-id` can be arbitrary string that make sense to client.
+     * Other URI formats will return error and cause Cloud Storage write failure.
      * 
* * string annotation_result_storage_directory = 3; @@ -634,16 +634,16 @@ public java.lang.String getAnnotationResultStorageDirectory() { * * *
-     * GCS URI to store all annotation results for one client. Client should
-     * specify this field as the top-level storage directory. Annotation results
-     * of different sessions will be put into different sub-directories denoted
-     * by project_name and session_id. All sub-directories will be auto generated
-     * by program and will be made accessible to client in response proto.
-     * URIs must be specified in the following format: `gs://bucket-id/object-id`
-     * `bucket-id` should be a valid GCS bucket created by client and bucket
-     * permission shall also be configured properly. `object-id` can be arbitrary
-     * string that make sense to client. Other URI formats will return error and
-     * cause GCS write failure.
+     * Cloud Storage URI to store all annotation results for one client. Client
+     * should specify this field as the top-level storage directory. Annotation
+     * results of different sessions will be put into different sub-directories
+     * denoted by project_name and session_id. All sub-directories will be auto
+     * generated by program and will be made accessible to client in response
+     * proto. URIs must be specified in the following format:
+     * `gs://bucket-id/object-id` `bucket-id` should be a valid Cloud Storage
+     * bucket created by client and bucket permission shall also be configured
+     * properly. `object-id` can be arbitrary string that make sense to client.
+     * Other URI formats will return error and cause Cloud Storage write failure.
      * 
* * string annotation_result_storage_directory = 3; @@ -665,16 +665,16 @@ public com.google.protobuf.ByteString getAnnotationResultStorageDirectoryBytes() * * *
-     * GCS URI to store all annotation results for one client. Client should
-     * specify this field as the top-level storage directory. Annotation results
-     * of different sessions will be put into different sub-directories denoted
-     * by project_name and session_id. All sub-directories will be auto generated
-     * by program and will be made accessible to client in response proto.
-     * URIs must be specified in the following format: `gs://bucket-id/object-id`
-     * `bucket-id` should be a valid GCS bucket created by client and bucket
-     * permission shall also be configured properly. `object-id` can be arbitrary
-     * string that make sense to client. Other URI formats will return error and
-     * cause GCS write failure.
+     * Cloud Storage URI to store all annotation results for one client. Client
+     * should specify this field as the top-level storage directory. Annotation
+     * results of different sessions will be put into different sub-directories
+     * denoted by project_name and session_id. All sub-directories will be auto
+     * generated by program and will be made accessible to client in response
+     * proto. URIs must be specified in the following format:
+     * `gs://bucket-id/object-id` `bucket-id` should be a valid Cloud Storage
+     * bucket created by client and bucket permission shall also be configured
+     * properly. `object-id` can be arbitrary string that make sense to client.
+     * Other URI formats will return error and cause Cloud Storage write failure.
      * 
* * string annotation_result_storage_directory = 3; @@ -695,16 +695,16 @@ public Builder setAnnotationResultStorageDirectory(java.lang.String value) { * * *
-     * GCS URI to store all annotation results for one client. Client should
-     * specify this field as the top-level storage directory. Annotation results
-     * of different sessions will be put into different sub-directories denoted
-     * by project_name and session_id. All sub-directories will be auto generated
-     * by program and will be made accessible to client in response proto.
-     * URIs must be specified in the following format: `gs://bucket-id/object-id`
-     * `bucket-id` should be a valid GCS bucket created by client and bucket
-     * permission shall also be configured properly. `object-id` can be arbitrary
-     * string that make sense to client. Other URI formats will return error and
-     * cause GCS write failure.
+     * Cloud Storage URI to store all annotation results for one client. Client
+     * should specify this field as the top-level storage directory. Annotation
+     * results of different sessions will be put into different sub-directories
+     * denoted by project_name and session_id. All sub-directories will be auto
+     * generated by program and will be made accessible to client in response
+     * proto. URIs must be specified in the following format:
+     * `gs://bucket-id/object-id` `bucket-id` should be a valid Cloud Storage
+     * bucket created by client and bucket permission shall also be configured
+     * properly. `object-id` can be arbitrary string that make sense to client.
+     * Other URI formats will return error and cause Cloud Storage write failure.
      * 
* * string annotation_result_storage_directory = 3; @@ -722,16 +722,16 @@ public Builder clearAnnotationResultStorageDirectory() { * * *
-     * GCS URI to store all annotation results for one client. Client should
-     * specify this field as the top-level storage directory. Annotation results
-     * of different sessions will be put into different sub-directories denoted
-     * by project_name and session_id. All sub-directories will be auto generated
-     * by program and will be made accessible to client in response proto.
-     * URIs must be specified in the following format: `gs://bucket-id/object-id`
-     * `bucket-id` should be a valid GCS bucket created by client and bucket
-     * permission shall also be configured properly. `object-id` can be arbitrary
-     * string that make sense to client. Other URI formats will return error and
-     * cause GCS write failure.
+     * Cloud Storage URI to store all annotation results for one client. Client
+     * should specify this field as the top-level storage directory. Annotation
+     * results of different sessions will be put into different sub-directories
+     * denoted by project_name and session_id. All sub-directories will be auto
+     * generated by program and will be made accessible to client in response
+     * proto. URIs must be specified in the following format:
+     * `gs://bucket-id/object-id` `bucket-id` should be a valid Cloud Storage
+     * bucket created by client and bucket permission shall also be configured
+     * properly. `object-id` can be arbitrary string that make sense to client.
+     * Other URI formats will return error and cause Cloud Storage write failure.
      * 
* * string annotation_result_storage_directory = 3; diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingStorageConfigOrBuilder.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingStorageConfigOrBuilder.java index cd8478068..8d6e62d4f 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingStorageConfigOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingStorageConfigOrBuilder.java @@ -40,16 +40,16 @@ public interface StreamingStorageConfigOrBuilder * * *
-   * GCS URI to store all annotation results for one client. Client should
-   * specify this field as the top-level storage directory. Annotation results
-   * of different sessions will be put into different sub-directories denoted
-   * by project_name and session_id. All sub-directories will be auto generated
-   * by program and will be made accessible to client in response proto.
-   * URIs must be specified in the following format: `gs://bucket-id/object-id`
-   * `bucket-id` should be a valid GCS bucket created by client and bucket
-   * permission shall also be configured properly. `object-id` can be arbitrary
-   * string that make sense to client. Other URI formats will return error and
-   * cause GCS write failure.
+   * Cloud Storage URI to store all annotation results for one client. Client
+   * should specify this field as the top-level storage directory. Annotation
+   * results of different sessions will be put into different sub-directories
+   * denoted by project_name and session_id. All sub-directories will be auto
+   * generated by program and will be made accessible to client in response
+   * proto. URIs must be specified in the following format:
+   * `gs://bucket-id/object-id` `bucket-id` should be a valid Cloud Storage
+   * bucket created by client and bucket permission shall also be configured
+   * properly. `object-id` can be arbitrary string that make sense to client.
+   * Other URI formats will return error and cause Cloud Storage write failure.
    * 
* * string annotation_result_storage_directory = 3; @@ -61,16 +61,16 @@ public interface StreamingStorageConfigOrBuilder * * *
-   * GCS URI to store all annotation results for one client. Client should
-   * specify this field as the top-level storage directory. Annotation results
-   * of different sessions will be put into different sub-directories denoted
-   * by project_name and session_id. All sub-directories will be auto generated
-   * by program and will be made accessible to client in response proto.
-   * URIs must be specified in the following format: `gs://bucket-id/object-id`
-   * `bucket-id` should be a valid GCS bucket created by client and bucket
-   * permission shall also be configured properly. `object-id` can be arbitrary
-   * string that make sense to client. Other URI formats will return error and
-   * cause GCS write failure.
+   * Cloud Storage URI to store all annotation results for one client. Client
+   * should specify this field as the top-level storage directory. Annotation
+   * results of different sessions will be put into different sub-directories
+   * denoted by project_name and session_id. All sub-directories will be auto
+   * generated by program and will be made accessible to client in response
+   * proto. URIs must be specified in the following format:
+   * `gs://bucket-id/object-id` `bucket-id` should be a valid Cloud Storage
+   * bucket created by client and bucket permission shall also be configured
+   * properly. `object-id` can be arbitrary string that make sense to client.
+   * Other URI formats will return error and cause Cloud Storage write failure.
    * 
* * string annotation_result_storage_directory = 3; diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingVideoConfig.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingVideoConfig.java index 740a77c67..a21d8a34c 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingVideoConfig.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingVideoConfig.java @@ -234,6 +234,33 @@ private StreamingVideoConfig( streamingConfigCase_ = 22; break; } + case 186: + { + com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + .Builder + subBuilder = null; + if (streamingConfigCase_ == 23) { + subBuilder = + ((com.google.cloud.videointelligence.v1p3beta1 + .StreamingAutomlActionRecognitionConfig) + streamingConfig_) + .toBuilder(); + } + streamingConfig_ = + input.readMessage( + com.google.cloud.videointelligence.v1p3beta1 + .StreamingAutomlActionRecognitionConfig.parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom( + (com.google.cloud.videointelligence.v1p3beta1 + .StreamingAutomlActionRecognitionConfig) + streamingConfig_); + streamingConfig_ = subBuilder.buildPartial(); + } + streamingConfigCase_ = 23; + break; + } case 242: { com.google.cloud.videointelligence.v1p3beta1.StreamingStorageConfig.Builder @@ -297,6 +324,7 @@ public enum StreamingConfigCase LABEL_DETECTION_CONFIG(3), EXPLICIT_CONTENT_DETECTION_CONFIG(4), OBJECT_TRACKING_CONFIG(5), + AUTOML_ACTION_RECOGNITION_CONFIG(23), AUTOML_CLASSIFICATION_CONFIG(21), AUTOML_OBJECT_TRACKING_CONFIG(22), STREAMINGCONFIG_NOT_SET(0); @@ -325,6 +353,8 @@ public static StreamingConfigCase forNumber(int value) { return EXPLICIT_CONTENT_DETECTION_CONFIG; case 5: return OBJECT_TRACKING_CONFIG; + case 23: + return AUTOML_ACTION_RECOGNITION_CONFIG; case 21: return AUTOML_CLASSIFICATION_CONFIG; case 22: @@ -345,42 +375,6 @@ public StreamingConfigCase getStreamingConfigCase() { return StreamingConfigCase.forNumber(streamingConfigCase_); } - public static final int FEATURE_FIELD_NUMBER = 1; - private int feature_; - /** - * - * - *
-   * Requested annotation feature.
-   * 
- * - * .google.cloud.videointelligence.v1p3beta1.StreamingFeature feature = 1; - * - * @return The enum numeric value on the wire for feature. - */ - public int getFeatureValue() { - return feature_; - } - /** - * - * - *
-   * Requested annotation feature.
-   * 
- * - * .google.cloud.videointelligence.v1p3beta1.StreamingFeature feature = 1; - * - * @return The feature. - */ - public com.google.cloud.videointelligence.v1p3beta1.StreamingFeature getFeature() { - @SuppressWarnings("deprecation") - com.google.cloud.videointelligence.v1p3beta1.StreamingFeature result = - com.google.cloud.videointelligence.v1p3beta1.StreamingFeature.valueOf(feature_); - return result == null - ? com.google.cloud.videointelligence.v1p3beta1.StreamingFeature.UNRECOGNIZED - : result; - } - public static final int SHOT_CHANGE_DETECTION_CONFIG_FIELD_NUMBER = 2; /** * @@ -622,6 +616,67 @@ public boolean hasObjectTrackingConfig() { .getDefaultInstance(); } + public static final int AUTOML_ACTION_RECOGNITION_CONFIG_FIELD_NUMBER = 23; + /** + * + * + *
+   * Config for STREAMING_AUTOML_ACTION_RECOGNITION.
+   * 
+ * + * + * .google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig automl_action_recognition_config = 23; + * + * + * @return Whether the automlActionRecognitionConfig field is set. + */ + public boolean hasAutomlActionRecognitionConfig() { + return streamingConfigCase_ == 23; + } + /** + * + * + *
+   * Config for STREAMING_AUTOML_ACTION_RECOGNITION.
+   * 
+ * + * + * .google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig automl_action_recognition_config = 23; + * + * + * @return The automlActionRecognitionConfig. + */ + public com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + getAutomlActionRecognitionConfig() { + if (streamingConfigCase_ == 23) { + return (com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig) + streamingConfig_; + } + return com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + .getDefaultInstance(); + } + /** + * + * + *
+   * Config for STREAMING_AUTOML_ACTION_RECOGNITION.
+   * 
+ * + * + * .google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig automl_action_recognition_config = 23; + * + */ + public com.google.cloud.videointelligence.v1p3beta1 + .StreamingAutomlActionRecognitionConfigOrBuilder + getAutomlActionRecognitionConfigOrBuilder() { + if (streamingConfigCase_ == 23) { + return (com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig) + streamingConfig_; + } + return com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + .getDefaultInstance(); + } + public static final int AUTOML_CLASSIFICATION_CONFIG_FIELD_NUMBER = 21; /** * @@ -742,6 +797,42 @@ public boolean hasAutomlObjectTrackingConfig() { .getDefaultInstance(); } + public static final int FEATURE_FIELD_NUMBER = 1; + private int feature_; + /** + * + * + *
+   * Requested annotation feature.
+   * 
+ * + * .google.cloud.videointelligence.v1p3beta1.StreamingFeature feature = 1; + * + * @return The enum numeric value on the wire for feature. + */ + public int getFeatureValue() { + return feature_; + } + /** + * + * + *
+   * Requested annotation feature.
+   * 
+ * + * .google.cloud.videointelligence.v1p3beta1.StreamingFeature feature = 1; + * + * @return The feature. + */ + public com.google.cloud.videointelligence.v1p3beta1.StreamingFeature getFeature() { + @SuppressWarnings("deprecation") + com.google.cloud.videointelligence.v1p3beta1.StreamingFeature result = + com.google.cloud.videointelligence.v1p3beta1.StreamingFeature.valueOf(feature_); + return result == null + ? com.google.cloud.videointelligence.v1p3beta1.StreamingFeature.UNRECOGNIZED + : result; + } + public static final int STORAGE_CONFIG_FIELD_NUMBER = 30; private com.google.cloud.videointelligence.v1p3beta1.StreamingStorageConfig storageConfig_; /** @@ -847,6 +938,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io (com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlObjectTrackingConfig) streamingConfig_); } + if (streamingConfigCase_ == 23) { + output.writeMessage( + 23, + (com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig) + streamingConfig_); + } if (storageConfig_ != null) { output.writeMessage(30, getStorageConfig()); } @@ -907,6 +1004,13 @@ public int getSerializedSize() { (com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlObjectTrackingConfig) streamingConfig_); } + if (streamingConfigCase_ == 23) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 23, + (com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig) + streamingConfig_); + } if (storageConfig_ != null) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(30, getStorageConfig()); } @@ -947,6 +1051,10 @@ public boolean equals(final java.lang.Object obj) { case 5: if (!getObjectTrackingConfig().equals(other.getObjectTrackingConfig())) return false; break; + case 23: + if (!getAutomlActionRecognitionConfig().equals(other.getAutomlActionRecognitionConfig())) + return false; + break; case 21: if (!getAutomlClassificationConfig().equals(other.getAutomlClassificationConfig())) return false; @@ -992,6 +1100,10 @@ public int hashCode() { hash = (37 * hash) + OBJECT_TRACKING_CONFIG_FIELD_NUMBER; hash = (53 * hash) + getObjectTrackingConfig().hashCode(); break; + case 23: + hash = (37 * hash) + AUTOML_ACTION_RECOGNITION_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getAutomlActionRecognitionConfig().hashCode(); + break; case 21: hash = (37 * hash) + AUTOML_CLASSIFICATION_CONFIG_FIELD_NUMBER; hash = (53 * hash) + getAutomlClassificationConfig().hashCode(); @@ -1190,7 +1302,6 @@ public com.google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig build() public com.google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig buildPartial() { com.google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig result = new com.google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig(this); - result.feature_ = feature_; if (streamingConfigCase_ == 2) { if (shotChangeDetectionConfigBuilder_ == null) { result.streamingConfig_ = streamingConfig_; @@ -1219,6 +1330,13 @@ public com.google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig buildPa result.streamingConfig_ = objectTrackingConfigBuilder_.build(); } } + if (streamingConfigCase_ == 23) { + if (automlActionRecognitionConfigBuilder_ == null) { + result.streamingConfig_ = streamingConfig_; + } else { + result.streamingConfig_ = automlActionRecognitionConfigBuilder_.build(); + } + } if (streamingConfigCase_ == 21) { if (automlClassificationConfigBuilder_ == null) { result.streamingConfig_ = streamingConfig_; @@ -1233,6 +1351,7 @@ public com.google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig buildPa result.streamingConfig_ = automlObjectTrackingConfigBuilder_.build(); } } + result.feature_ = feature_; if (storageConfigBuilder_ == null) { result.storageConfig_ = storageConfig_; } else { @@ -1318,6 +1437,11 @@ public Builder mergeFrom( mergeObjectTrackingConfig(other.getObjectTrackingConfig()); break; } + case AUTOML_ACTION_RECOGNITION_CONFIG: + { + mergeAutomlActionRecognitionConfig(other.getAutomlActionRecognitionConfig()); + break; + } case AUTOML_CLASSIFICATION_CONFIG: { mergeAutomlClassificationConfig(other.getAutomlClassificationConfig()); @@ -1378,96 +1502,6 @@ public Builder clearStreamingConfig() { return this; } - private int feature_ = 0; - /** - * - * - *
-     * Requested annotation feature.
-     * 
- * - * .google.cloud.videointelligence.v1p3beta1.StreamingFeature feature = 1; - * - * @return The enum numeric value on the wire for feature. - */ - public int getFeatureValue() { - return feature_; - } - /** - * - * - *
-     * Requested annotation feature.
-     * 
- * - * .google.cloud.videointelligence.v1p3beta1.StreamingFeature feature = 1; - * - * @param value The enum numeric value on the wire for feature to set. - * @return This builder for chaining. - */ - public Builder setFeatureValue(int value) { - feature_ = value; - onChanged(); - return this; - } - /** - * - * - *
-     * Requested annotation feature.
-     * 
- * - * .google.cloud.videointelligence.v1p3beta1.StreamingFeature feature = 1; - * - * @return The feature. - */ - public com.google.cloud.videointelligence.v1p3beta1.StreamingFeature getFeature() { - @SuppressWarnings("deprecation") - com.google.cloud.videointelligence.v1p3beta1.StreamingFeature result = - com.google.cloud.videointelligence.v1p3beta1.StreamingFeature.valueOf(feature_); - return result == null - ? com.google.cloud.videointelligence.v1p3beta1.StreamingFeature.UNRECOGNIZED - : result; - } - /** - * - * - *
-     * Requested annotation feature.
-     * 
- * - * .google.cloud.videointelligence.v1p3beta1.StreamingFeature feature = 1; - * - * @param value The feature to set. - * @return This builder for chaining. - */ - public Builder setFeature(com.google.cloud.videointelligence.v1p3beta1.StreamingFeature value) { - if (value == null) { - throw new NullPointerException(); - } - - feature_ = value.getNumber(); - onChanged(); - return this; - } - /** - * - * - *
-     * Requested annotation feature.
-     * 
- * - * .google.cloud.videointelligence.v1p3beta1.StreamingFeature feature = 1; - * - * @return This builder for chaining. - */ - public Builder clearFeature() { - - feature_ = 0; - onChanged(); - return this; - } - private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.videointelligence.v1p3beta1.StreamingShotChangeDetectionConfig, com.google.cloud.videointelligence.v1p3beta1.StreamingShotChangeDetectionConfig.Builder, @@ -2456,6 +2490,260 @@ public Builder clearObjectTrackingConfig() { return objectTrackingConfigBuilder_; } + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig, + com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + .Builder, + com.google.cloud.videointelligence.v1p3beta1 + .StreamingAutomlActionRecognitionConfigOrBuilder> + automlActionRecognitionConfigBuilder_; + /** + * + * + *
+     * Config for STREAMING_AUTOML_ACTION_RECOGNITION.
+     * 
+ * + * + * .google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig automl_action_recognition_config = 23; + * + * + * @return Whether the automlActionRecognitionConfig field is set. + */ + public boolean hasAutomlActionRecognitionConfig() { + return streamingConfigCase_ == 23; + } + /** + * + * + *
+     * Config for STREAMING_AUTOML_ACTION_RECOGNITION.
+     * 
+ * + * + * .google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig automl_action_recognition_config = 23; + * + * + * @return The automlActionRecognitionConfig. + */ + public com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + getAutomlActionRecognitionConfig() { + if (automlActionRecognitionConfigBuilder_ == null) { + if (streamingConfigCase_ == 23) { + return (com.google.cloud.videointelligence.v1p3beta1 + .StreamingAutomlActionRecognitionConfig) + streamingConfig_; + } + return com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + .getDefaultInstance(); + } else { + if (streamingConfigCase_ == 23) { + return automlActionRecognitionConfigBuilder_.getMessage(); + } + return com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + .getDefaultInstance(); + } + } + /** + * + * + *
+     * Config for STREAMING_AUTOML_ACTION_RECOGNITION.
+     * 
+ * + * + * .google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig automl_action_recognition_config = 23; + * + */ + public Builder setAutomlActionRecognitionConfig( + com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig value) { + if (automlActionRecognitionConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + streamingConfig_ = value; + onChanged(); + } else { + automlActionRecognitionConfigBuilder_.setMessage(value); + } + streamingConfigCase_ = 23; + return this; + } + /** + * + * + *
+     * Config for STREAMING_AUTOML_ACTION_RECOGNITION.
+     * 
+ * + * + * .google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig automl_action_recognition_config = 23; + * + */ + public Builder setAutomlActionRecognitionConfig( + com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig.Builder + builderForValue) { + if (automlActionRecognitionConfigBuilder_ == null) { + streamingConfig_ = builderForValue.build(); + onChanged(); + } else { + automlActionRecognitionConfigBuilder_.setMessage(builderForValue.build()); + } + streamingConfigCase_ = 23; + return this; + } + /** + * + * + *
+     * Config for STREAMING_AUTOML_ACTION_RECOGNITION.
+     * 
+ * + * + * .google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig automl_action_recognition_config = 23; + * + */ + public Builder mergeAutomlActionRecognitionConfig( + com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig value) { + if (automlActionRecognitionConfigBuilder_ == null) { + if (streamingConfigCase_ == 23 + && streamingConfig_ + != com.google.cloud.videointelligence.v1p3beta1 + .StreamingAutomlActionRecognitionConfig.getDefaultInstance()) { + streamingConfig_ = + com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + .newBuilder( + (com.google.cloud.videointelligence.v1p3beta1 + .StreamingAutomlActionRecognitionConfig) + streamingConfig_) + .mergeFrom(value) + .buildPartial(); + } else { + streamingConfig_ = value; + } + onChanged(); + } else { + if (streamingConfigCase_ == 23) { + automlActionRecognitionConfigBuilder_.mergeFrom(value); + } + automlActionRecognitionConfigBuilder_.setMessage(value); + } + streamingConfigCase_ = 23; + return this; + } + /** + * + * + *
+     * Config for STREAMING_AUTOML_ACTION_RECOGNITION.
+     * 
+ * + * + * .google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig automl_action_recognition_config = 23; + * + */ + public Builder clearAutomlActionRecognitionConfig() { + if (automlActionRecognitionConfigBuilder_ == null) { + if (streamingConfigCase_ == 23) { + streamingConfigCase_ = 0; + streamingConfig_ = null; + onChanged(); + } + } else { + if (streamingConfigCase_ == 23) { + streamingConfigCase_ = 0; + streamingConfig_ = null; + } + automlActionRecognitionConfigBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Config for STREAMING_AUTOML_ACTION_RECOGNITION.
+     * 
+ * + * + * .google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig automl_action_recognition_config = 23; + * + */ + public com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + .Builder + getAutomlActionRecognitionConfigBuilder() { + return getAutomlActionRecognitionConfigFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Config for STREAMING_AUTOML_ACTION_RECOGNITION.
+     * 
+ * + * + * .google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig automl_action_recognition_config = 23; + * + */ + public com.google.cloud.videointelligence.v1p3beta1 + .StreamingAutomlActionRecognitionConfigOrBuilder + getAutomlActionRecognitionConfigOrBuilder() { + if ((streamingConfigCase_ == 23) && (automlActionRecognitionConfigBuilder_ != null)) { + return automlActionRecognitionConfigBuilder_.getMessageOrBuilder(); + } else { + if (streamingConfigCase_ == 23) { + return (com.google.cloud.videointelligence.v1p3beta1 + .StreamingAutomlActionRecognitionConfig) + streamingConfig_; + } + return com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + .getDefaultInstance(); + } + } + /** + * + * + *
+     * Config for STREAMING_AUTOML_ACTION_RECOGNITION.
+     * 
+ * + * + * .google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig automl_action_recognition_config = 23; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig, + com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + .Builder, + com.google.cloud.videointelligence.v1p3beta1 + .StreamingAutomlActionRecognitionConfigOrBuilder> + getAutomlActionRecognitionConfigFieldBuilder() { + if (automlActionRecognitionConfigBuilder_ == null) { + if (!(streamingConfigCase_ == 23)) { + streamingConfig_ = + com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + .getDefaultInstance(); + } + automlActionRecognitionConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig, + com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + .Builder, + com.google.cloud.videointelligence.v1p3beta1 + .StreamingAutomlActionRecognitionConfigOrBuilder>( + (com.google.cloud.videointelligence.v1p3beta1 + .StreamingAutomlActionRecognitionConfig) + streamingConfig_, + getParentForChildren(), + isClean()); + streamingConfig_ = null; + } + streamingConfigCase_ = 23; + onChanged(); + ; + return automlActionRecognitionConfigBuilder_; + } + private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlClassificationConfig, com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlClassificationConfig @@ -2954,6 +3242,96 @@ public Builder clearAutomlObjectTrackingConfig() { return automlObjectTrackingConfigBuilder_; } + private int feature_ = 0; + /** + * + * + *
+     * Requested annotation feature.
+     * 
+ * + * .google.cloud.videointelligence.v1p3beta1.StreamingFeature feature = 1; + * + * @return The enum numeric value on the wire for feature. + */ + public int getFeatureValue() { + return feature_; + } + /** + * + * + *
+     * Requested annotation feature.
+     * 
+ * + * .google.cloud.videointelligence.v1p3beta1.StreamingFeature feature = 1; + * + * @param value The enum numeric value on the wire for feature to set. + * @return This builder for chaining. + */ + public Builder setFeatureValue(int value) { + feature_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Requested annotation feature.
+     * 
+ * + * .google.cloud.videointelligence.v1p3beta1.StreamingFeature feature = 1; + * + * @return The feature. + */ + public com.google.cloud.videointelligence.v1p3beta1.StreamingFeature getFeature() { + @SuppressWarnings("deprecation") + com.google.cloud.videointelligence.v1p3beta1.StreamingFeature result = + com.google.cloud.videointelligence.v1p3beta1.StreamingFeature.valueOf(feature_); + return result == null + ? com.google.cloud.videointelligence.v1p3beta1.StreamingFeature.UNRECOGNIZED + : result; + } + /** + * + * + *
+     * Requested annotation feature.
+     * 
+ * + * .google.cloud.videointelligence.v1p3beta1.StreamingFeature feature = 1; + * + * @param value The feature to set. + * @return This builder for chaining. + */ + public Builder setFeature(com.google.cloud.videointelligence.v1p3beta1.StreamingFeature value) { + if (value == null) { + throw new NullPointerException(); + } + + feature_ = value.getNumber(); + onChanged(); + return this; + } + /** + * + * + *
+     * Requested annotation feature.
+     * 
+ * + * .google.cloud.videointelligence.v1p3beta1.StreamingFeature feature = 1; + * + * @return This builder for chaining. + */ + public Builder clearFeature() { + + feature_ = 0; + onChanged(); + return this; + } + private com.google.cloud.videointelligence.v1p3beta1.StreamingStorageConfig storageConfig_; private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.videointelligence.v1p3beta1.StreamingStorageConfig, diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingVideoConfigOrBuilder.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingVideoConfigOrBuilder.java index ba9db5082..dba06a0c2 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingVideoConfigOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/StreamingVideoConfigOrBuilder.java @@ -23,31 +23,6 @@ public interface StreamingVideoConfigOrBuilder // @@protoc_insertion_point(interface_extends:google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig) com.google.protobuf.MessageOrBuilder { - /** - * - * - *
-   * Requested annotation feature.
-   * 
- * - * .google.cloud.videointelligence.v1p3beta1.StreamingFeature feature = 1; - * - * @return The enum numeric value on the wire for feature. - */ - int getFeatureValue(); - /** - * - * - *
-   * Requested annotation feature.
-   * 
- * - * .google.cloud.videointelligence.v1p3beta1.StreamingFeature feature = 1; - * - * @return The feature. - */ - com.google.cloud.videointelligence.v1p3beta1.StreamingFeature getFeature(); - /** * * @@ -220,6 +195,49 @@ public interface StreamingVideoConfigOrBuilder com.google.cloud.videointelligence.v1p3beta1.StreamingObjectTrackingConfigOrBuilder getObjectTrackingConfigOrBuilder(); + /** + * + * + *
+   * Config for STREAMING_AUTOML_ACTION_RECOGNITION.
+   * 
+ * + * + * .google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig automl_action_recognition_config = 23; + * + * + * @return Whether the automlActionRecognitionConfig field is set. + */ + boolean hasAutomlActionRecognitionConfig(); + /** + * + * + *
+   * Config for STREAMING_AUTOML_ACTION_RECOGNITION.
+   * 
+ * + * + * .google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig automl_action_recognition_config = 23; + * + * + * @return The automlActionRecognitionConfig. + */ + com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig + getAutomlActionRecognitionConfig(); + /** + * + * + *
+   * Config for STREAMING_AUTOML_ACTION_RECOGNITION.
+   * 
+ * + * + * .google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig automl_action_recognition_config = 23; + * + */ + com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfigOrBuilder + getAutomlActionRecognitionConfigOrBuilder(); + /** * * @@ -306,6 +324,31 @@ public interface StreamingVideoConfigOrBuilder com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlObjectTrackingConfigOrBuilder getAutomlObjectTrackingConfigOrBuilder(); + /** + * + * + *
+   * Requested annotation feature.
+   * 
+ * + * .google.cloud.videointelligence.v1p3beta1.StreamingFeature feature = 1; + * + * @return The enum numeric value on the wire for feature. + */ + int getFeatureValue(); + /** + * + * + *
+   * Requested annotation feature.
+   * 
+ * + * .google.cloud.videointelligence.v1p3beta1.StreamingFeature feature = 1; + * + * @return The feature. + */ + com.google.cloud.videointelligence.v1p3beta1.StreamingFeature getFeature(); + /** * * diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/VideoAnnotationProgress.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/VideoAnnotationProgress.java index 967443425..4eb4c1002 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/VideoAnnotationProgress.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/VideoAnnotationProgress.java @@ -178,7 +178,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * *
    * Video file location in
-   * [Google Cloud Storage](https://cloud.google.com/storage/).
+   * [Cloud Storage](https://cloud.google.com/storage/).
    * 
* * string input_uri = 1; @@ -201,7 +201,7 @@ public java.lang.String getInputUri() { * *
    * Video file location in
-   * [Google Cloud Storage](https://cloud.google.com/storage/).
+   * [Cloud Storage](https://cloud.google.com/storage/).
    * 
* * string input_uri = 1; @@ -331,7 +331,7 @@ public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { * *
    * Specifies which feature is being tracked if the request contains more than
-   * one features.
+   * one feature.
    * 
* * .google.cloud.videointelligence.v1p3beta1.Feature feature = 5; @@ -346,7 +346,7 @@ public int getFeatureValue() { * *
    * Specifies which feature is being tracked if the request contains more than
-   * one features.
+   * one feature.
    * 
* * .google.cloud.videointelligence.v1p3beta1.Feature feature = 5; @@ -369,7 +369,7 @@ public com.google.cloud.videointelligence.v1p3beta1.Feature getFeature() { * *
    * Specifies which segment is being tracked if the request contains more than
-   * one segments.
+   * one segment.
    * 
* * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 6; @@ -384,7 +384,7 @@ public boolean hasSegment() { * *
    * Specifies which segment is being tracked if the request contains more than
-   * one segments.
+   * one segment.
    * 
* * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 6; @@ -401,7 +401,7 @@ public com.google.cloud.videointelligence.v1p3beta1.VideoSegment getSegment() { * *
    * Specifies which segment is being tracked if the request contains more than
-   * one segments.
+   * one segment.
    * 
* * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 6; @@ -859,7 +859,7 @@ public Builder mergeFrom( * *
      * Video file location in
-     * [Google Cloud Storage](https://cloud.google.com/storage/).
+     * [Cloud Storage](https://cloud.google.com/storage/).
      * 
* * string input_uri = 1; @@ -882,7 +882,7 @@ public java.lang.String getInputUri() { * *
      * Video file location in
-     * [Google Cloud Storage](https://cloud.google.com/storage/).
+     * [Cloud Storage](https://cloud.google.com/storage/).
      * 
* * string input_uri = 1; @@ -905,7 +905,7 @@ public com.google.protobuf.ByteString getInputUriBytes() { * *
      * Video file location in
-     * [Google Cloud Storage](https://cloud.google.com/storage/).
+     * [Cloud Storage](https://cloud.google.com/storage/).
      * 
* * string input_uri = 1; @@ -927,7 +927,7 @@ public Builder setInputUri(java.lang.String value) { * *
      * Video file location in
-     * [Google Cloud Storage](https://cloud.google.com/storage/).
+     * [Cloud Storage](https://cloud.google.com/storage/).
      * 
* * string input_uri = 1; @@ -945,7 +945,7 @@ public Builder clearInputUri() { * *
      * Video file location in
-     * [Google Cloud Storage](https://cloud.google.com/storage/).
+     * [Cloud Storage](https://cloud.google.com/storage/).
      * 
* * string input_uri = 1; @@ -1386,7 +1386,7 @@ public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { * *
      * Specifies which feature is being tracked if the request contains more than
-     * one features.
+     * one feature.
      * 
* * .google.cloud.videointelligence.v1p3beta1.Feature feature = 5; @@ -1401,7 +1401,7 @@ public int getFeatureValue() { * *
      * Specifies which feature is being tracked if the request contains more than
-     * one features.
+     * one feature.
      * 
* * .google.cloud.videointelligence.v1p3beta1.Feature feature = 5; @@ -1419,7 +1419,7 @@ public Builder setFeatureValue(int value) { * *
      * Specifies which feature is being tracked if the request contains more than
-     * one features.
+     * one feature.
      * 
* * .google.cloud.videointelligence.v1p3beta1.Feature feature = 5; @@ -1439,7 +1439,7 @@ public com.google.cloud.videointelligence.v1p3beta1.Feature getFeature() { * *
      * Specifies which feature is being tracked if the request contains more than
-     * one features.
+     * one feature.
      * 
* * .google.cloud.videointelligence.v1p3beta1.Feature feature = 5; @@ -1461,7 +1461,7 @@ public Builder setFeature(com.google.cloud.videointelligence.v1p3beta1.Feature v * *
      * Specifies which feature is being tracked if the request contains more than
-     * one features.
+     * one feature.
      * 
* * .google.cloud.videointelligence.v1p3beta1.Feature feature = 5; @@ -1486,7 +1486,7 @@ public Builder clearFeature() { * *
      * Specifies which segment is being tracked if the request contains more than
-     * one segments.
+     * one segment.
      * 
* * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 6; @@ -1501,7 +1501,7 @@ public boolean hasSegment() { * *
      * Specifies which segment is being tracked if the request contains more than
-     * one segments.
+     * one segment.
      * 
* * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 6; @@ -1522,7 +1522,7 @@ public com.google.cloud.videointelligence.v1p3beta1.VideoSegment getSegment() { * *
      * Specifies which segment is being tracked if the request contains more than
-     * one segments.
+     * one segment.
      * 
* * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 6; @@ -1545,7 +1545,7 @@ public Builder setSegment(com.google.cloud.videointelligence.v1p3beta1.VideoSegm * *
      * Specifies which segment is being tracked if the request contains more than
-     * one segments.
+     * one segment.
      * 
* * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 6; @@ -1566,7 +1566,7 @@ public Builder setSegment( * *
      * Specifies which segment is being tracked if the request contains more than
-     * one segments.
+     * one segment.
      * 
* * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 6; @@ -1593,7 +1593,7 @@ public Builder mergeSegment(com.google.cloud.videointelligence.v1p3beta1.VideoSe * *
      * Specifies which segment is being tracked if the request contains more than
-     * one segments.
+     * one segment.
      * 
* * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 6; @@ -1614,7 +1614,7 @@ public Builder clearSegment() { * *
      * Specifies which segment is being tracked if the request contains more than
-     * one segments.
+     * one segment.
      * 
* * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 6; @@ -1629,7 +1629,7 @@ public com.google.cloud.videointelligence.v1p3beta1.VideoSegment.Builder getSegm * *
      * Specifies which segment is being tracked if the request contains more than
-     * one segments.
+     * one segment.
      * 
* * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 6; @@ -1649,7 +1649,7 @@ public com.google.cloud.videointelligence.v1p3beta1.VideoSegment.Builder getSegm * *
      * Specifies which segment is being tracked if the request contains more than
-     * one segments.
+     * one segment.
      * 
* * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 6; diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/VideoAnnotationProgressOrBuilder.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/VideoAnnotationProgressOrBuilder.java index 6e0c307c1..a10f56620 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/VideoAnnotationProgressOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/VideoAnnotationProgressOrBuilder.java @@ -28,7 +28,7 @@ public interface VideoAnnotationProgressOrBuilder * *
    * Video file location in
-   * [Google Cloud Storage](https://cloud.google.com/storage/).
+   * [Cloud Storage](https://cloud.google.com/storage/).
    * 
* * string input_uri = 1; @@ -41,7 +41,7 @@ public interface VideoAnnotationProgressOrBuilder * *
    * Video file location in
-   * [Google Cloud Storage](https://cloud.google.com/storage/).
+   * [Cloud Storage](https://cloud.google.com/storage/).
    * 
* * string input_uri = 1; @@ -139,7 +139,7 @@ public interface VideoAnnotationProgressOrBuilder * *
    * Specifies which feature is being tracked if the request contains more than
-   * one features.
+   * one feature.
    * 
* * .google.cloud.videointelligence.v1p3beta1.Feature feature = 5; @@ -152,7 +152,7 @@ public interface VideoAnnotationProgressOrBuilder * *
    * Specifies which feature is being tracked if the request contains more than
-   * one features.
+   * one feature.
    * 
* * .google.cloud.videointelligence.v1p3beta1.Feature feature = 5; @@ -166,7 +166,7 @@ public interface VideoAnnotationProgressOrBuilder * *
    * Specifies which segment is being tracked if the request contains more than
-   * one segments.
+   * one segment.
    * 
* * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 6; @@ -179,7 +179,7 @@ public interface VideoAnnotationProgressOrBuilder * *
    * Specifies which segment is being tracked if the request contains more than
-   * one segments.
+   * one segment.
    * 
* * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 6; @@ -192,7 +192,7 @@ public interface VideoAnnotationProgressOrBuilder * *
    * Specifies which segment is being tracked if the request contains more than
-   * one segments.
+   * one segment.
    * 
* * .google.cloud.videointelligence.v1p3beta1.VideoSegment segment = 6; diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/VideoAnnotationResults.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/VideoAnnotationResults.java index 00890f39b..e12c0f587 100644 --- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/VideoAnnotationResults.java +++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/VideoAnnotationResults.java @@ -412,7 +412,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * *
    * Video file location in
-   * [Google Cloud Storage](https://cloud.google.com/storage/).
+   * [Cloud Storage](https://cloud.google.com/storage/).
    * 
* * string input_uri = 1; @@ -435,7 +435,7 @@ public java.lang.String getInputUri() { * *
    * Video file location in
-   * [Google Cloud Storage](https://cloud.google.com/storage/).
+   * [Cloud Storage](https://cloud.google.com/storage/).
    * 
* * string input_uri = 1; @@ -506,7 +506,7 @@ public com.google.cloud.videointelligence.v1p3beta1.VideoSegmentOrBuilder getSeg * * *
-   * Topical label annotations on video level or user specified segment level.
+   * Topical label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label.
    * 
* @@ -522,7 +522,7 @@ public com.google.cloud.videointelligence.v1p3beta1.VideoSegmentOrBuilder getSeg * * *
-   * Topical label annotations on video level or user specified segment level.
+   * Topical label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label.
    * 
* @@ -539,7 +539,7 @@ public com.google.cloud.videointelligence.v1p3beta1.VideoSegmentOrBuilder getSeg * * *
-   * Topical label annotations on video level or user specified segment level.
+   * Topical label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label.
    * 
* @@ -554,7 +554,7 @@ public int getSegmentLabelAnnotationsCount() { * * *
-   * Topical label annotations on video level or user specified segment level.
+   * Topical label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label.
    * 
* @@ -570,7 +570,7 @@ public com.google.cloud.videointelligence.v1p3beta1.LabelAnnotation getSegmentLa * * *
-   * Topical label annotations on video level or user specified segment level.
+   * Topical label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label.
    * 
* @@ -590,7 +590,7 @@ public com.google.cloud.videointelligence.v1p3beta1.LabelAnnotation getSegmentLa * * *
-   * Presence label annotations on video level or user specified segment level.
+   * Presence label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label. Compared to the
    * existing topical `segment_label_annotations`, this field presents more
    * fine-grained, segment-level labels detected in video content and is made
@@ -610,7 +610,7 @@ public com.google.cloud.videointelligence.v1p3beta1.LabelAnnotation getSegmentLa
    *
    *
    * 
-   * Presence label annotations on video level or user specified segment level.
+   * Presence label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label. Compared to the
    * existing topical `segment_label_annotations`, this field presents more
    * fine-grained, segment-level labels detected in video content and is made
@@ -631,7 +631,7 @@ public com.google.cloud.videointelligence.v1p3beta1.LabelAnnotation getSegmentLa
    *
    *
    * 
-   * Presence label annotations on video level or user specified segment level.
+   * Presence label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label. Compared to the
    * existing topical `segment_label_annotations`, this field presents more
    * fine-grained, segment-level labels detected in video content and is made
@@ -650,7 +650,7 @@ public int getSegmentPresenceLabelAnnotationsCount() {
    *
    *
    * 
-   * Presence label annotations on video level or user specified segment level.
+   * Presence label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label. Compared to the
    * existing topical `segment_label_annotations`, this field presents more
    * fine-grained, segment-level labels detected in video content and is made
@@ -670,7 +670,7 @@ public int getSegmentPresenceLabelAnnotationsCount() {
    *
    *
    * 
-   * Presence label annotations on video level or user specified segment level.
+   * Presence label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label. Compared to the
    * existing topical `segment_label_annotations`, this field presents more
    * fine-grained, segment-level labels detected in video content and is made
@@ -2794,7 +2794,7 @@ public Builder mergeFrom(
      *
      * 
      * Video file location in
-     * [Google Cloud Storage](https://cloud.google.com/storage/).
+     * [Cloud Storage](https://cloud.google.com/storage/).
      * 
* * string input_uri = 1; @@ -2817,7 +2817,7 @@ public java.lang.String getInputUri() { * *
      * Video file location in
-     * [Google Cloud Storage](https://cloud.google.com/storage/).
+     * [Cloud Storage](https://cloud.google.com/storage/).
      * 
* * string input_uri = 1; @@ -2840,7 +2840,7 @@ public com.google.protobuf.ByteString getInputUriBytes() { * *
      * Video file location in
-     * [Google Cloud Storage](https://cloud.google.com/storage/).
+     * [Cloud Storage](https://cloud.google.com/storage/).
      * 
* * string input_uri = 1; @@ -2862,7 +2862,7 @@ public Builder setInputUri(java.lang.String value) { * *
      * Video file location in
-     * [Google Cloud Storage](https://cloud.google.com/storage/).
+     * [Cloud Storage](https://cloud.google.com/storage/).
      * 
* * string input_uri = 1; @@ -2880,7 +2880,7 @@ public Builder clearInputUri() { * *
      * Video file location in
-     * [Google Cloud Storage](https://cloud.google.com/storage/).
+     * [Cloud Storage](https://cloud.google.com/storage/).
      * 
* * string input_uri = 1; @@ -3108,7 +3108,7 @@ private void ensureSegmentLabelAnnotationsIsMutable() { * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3128,7 +3128,7 @@ private void ensureSegmentLabelAnnotationsIsMutable() { * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3147,7 +3147,7 @@ public int getSegmentLabelAnnotationsCount() { * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3167,7 +3167,7 @@ public com.google.cloud.videointelligence.v1p3beta1.LabelAnnotation getSegmentLa * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3193,7 +3193,7 @@ public Builder setSegmentLabelAnnotations( * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3217,7 +3217,7 @@ public Builder setSegmentLabelAnnotations( * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3243,7 +3243,7 @@ public Builder addSegmentLabelAnnotations( * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3269,7 +3269,7 @@ public Builder addSegmentLabelAnnotations( * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3292,7 +3292,7 @@ public Builder addSegmentLabelAnnotations( * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3316,7 +3316,7 @@ public Builder addSegmentLabelAnnotations( * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3340,7 +3340,7 @@ public Builder addAllSegmentLabelAnnotations( * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3362,7 +3362,7 @@ public Builder clearSegmentLabelAnnotations() { * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3384,7 +3384,7 @@ public Builder removeSegmentLabelAnnotations(int index) { * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3400,7 +3400,7 @@ public Builder removeSegmentLabelAnnotations(int index) { * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3420,7 +3420,7 @@ public Builder removeSegmentLabelAnnotations(int index) { * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3441,7 +3441,7 @@ public Builder removeSegmentLabelAnnotations(int index) { * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3459,7 +3459,7 @@ public Builder removeSegmentLabelAnnotations(int index) { * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3478,7 +3478,7 @@ public Builder removeSegmentLabelAnnotations(int index) { * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3533,7 +3533,7 @@ private void ensureSegmentPresenceLabelAnnotationsIsMutable() { * * *
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3557,7 +3557,7 @@ private void ensureSegmentPresenceLabelAnnotationsIsMutable() {
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3580,7 +3580,7 @@ public int getSegmentPresenceLabelAnnotationsCount() {
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3604,7 +3604,7 @@ public int getSegmentPresenceLabelAnnotationsCount() {
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3634,7 +3634,7 @@ public Builder setSegmentPresenceLabelAnnotations(
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3662,7 +3662,7 @@ public Builder setSegmentPresenceLabelAnnotations(
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3692,7 +3692,7 @@ public Builder addSegmentPresenceLabelAnnotations(
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3722,7 +3722,7 @@ public Builder addSegmentPresenceLabelAnnotations(
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3749,7 +3749,7 @@ public Builder addSegmentPresenceLabelAnnotations(
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3777,7 +3777,7 @@ public Builder addSegmentPresenceLabelAnnotations(
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3806,7 +3806,7 @@ public Builder addAllSegmentPresenceLabelAnnotations(
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3832,7 +3832,7 @@ public Builder clearSegmentPresenceLabelAnnotations() {
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3858,7 +3858,7 @@ public Builder removeSegmentPresenceLabelAnnotations(int index) {
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3878,7 +3878,7 @@ public Builder removeSegmentPresenceLabelAnnotations(int index) {
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3902,7 +3902,7 @@ public Builder removeSegmentPresenceLabelAnnotations(int index) {
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3927,7 +3927,7 @@ public Builder removeSegmentPresenceLabelAnnotations(int index) {
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3949,7 +3949,7 @@ public Builder removeSegmentPresenceLabelAnnotations(int index) {
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3972,7 +3972,7 @@ public Builder removeSegmentPresenceLabelAnnotations(int index) {
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/VideoAnnotationResultsOrBuilder.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/VideoAnnotationResultsOrBuilder.java
index 2d86c83a4..b9018e358 100644
--- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/VideoAnnotationResultsOrBuilder.java
+++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/VideoAnnotationResultsOrBuilder.java
@@ -28,7 +28,7 @@ public interface VideoAnnotationResultsOrBuilder
    *
    * 
    * Video file location in
-   * [Google Cloud Storage](https://cloud.google.com/storage/).
+   * [Cloud Storage](https://cloud.google.com/storage/).
    * 
* * string input_uri = 1; @@ -41,7 +41,7 @@ public interface VideoAnnotationResultsOrBuilder * *
    * Video file location in
-   * [Google Cloud Storage](https://cloud.google.com/storage/).
+   * [Cloud Storage](https://cloud.google.com/storage/).
    * 
* * string input_uri = 1; @@ -89,7 +89,7 @@ public interface VideoAnnotationResultsOrBuilder * * *
-   * Topical label annotations on video level or user specified segment level.
+   * Topical label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label.
    * 
* @@ -103,7 +103,7 @@ public interface VideoAnnotationResultsOrBuilder * * *
-   * Topical label annotations on video level or user specified segment level.
+   * Topical label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label.
    * 
* @@ -117,7 +117,7 @@ com.google.cloud.videointelligence.v1p3beta1.LabelAnnotation getSegmentLabelAnno * * *
-   * Topical label annotations on video level or user specified segment level.
+   * Topical label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label.
    * 
* @@ -130,7 +130,7 @@ com.google.cloud.videointelligence.v1p3beta1.LabelAnnotation getSegmentLabelAnno * * *
-   * Topical label annotations on video level or user specified segment level.
+   * Topical label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label.
    * 
* @@ -144,7 +144,7 @@ com.google.cloud.videointelligence.v1p3beta1.LabelAnnotation getSegmentLabelAnno * * *
-   * Topical label annotations on video level or user specified segment level.
+   * Topical label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label.
    * 
* @@ -159,7 +159,7 @@ com.google.cloud.videointelligence.v1p3beta1.LabelAnnotation getSegmentLabelAnno * * *
-   * Presence label annotations on video level or user specified segment level.
+   * Presence label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label. Compared to the
    * existing topical `segment_label_annotations`, this field presents more
    * fine-grained, segment-level labels detected in video content and is made
@@ -177,7 +177,7 @@ com.google.cloud.videointelligence.v1p3beta1.LabelAnnotation getSegmentLabelAnno
    *
    *
    * 
-   * Presence label annotations on video level or user specified segment level.
+   * Presence label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label. Compared to the
    * existing topical `segment_label_annotations`, this field presents more
    * fine-grained, segment-level labels detected in video content and is made
@@ -195,7 +195,7 @@ com.google.cloud.videointelligence.v1p3beta1.LabelAnnotation getSegmentPresenceL
    *
    *
    * 
-   * Presence label annotations on video level or user specified segment level.
+   * Presence label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label. Compared to the
    * existing topical `segment_label_annotations`, this field presents more
    * fine-grained, segment-level labels detected in video content and is made
@@ -212,7 +212,7 @@ com.google.cloud.videointelligence.v1p3beta1.LabelAnnotation getSegmentPresenceL
    *
    *
    * 
-   * Presence label annotations on video level or user specified segment level.
+   * Presence label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label. Compared to the
    * existing topical `segment_label_annotations`, this field presents more
    * fine-grained, segment-level labels detected in video content and is made
@@ -230,7 +230,7 @@ com.google.cloud.videointelligence.v1p3beta1.LabelAnnotation getSegmentPresenceL
    *
    *
    * 
-   * Presence label annotations on video level or user specified segment level.
+   * Presence label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label. Compared to the
    * existing topical `segment_label_annotations`, this field presents more
    * fine-grained, segment-level labels detected in video content and is made
diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/VideoIntelligenceServiceProto.java b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/VideoIntelligenceServiceProto.java
index de87ca63f..8ea96695a 100644
--- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/VideoIntelligenceServiceProto.java
+++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/VideoIntelligenceServiceProto.java
@@ -208,45 +208,49 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r
   static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
       internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAnnotateVideoRequest_fieldAccessorTable;
   static final com.google.protobuf.Descriptors.Descriptor
-      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAnnotateVideoResponse_descriptor;
+      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingVideoConfig_descriptor;
   static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAnnotateVideoResponse_fieldAccessorTable;
+      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingVideoConfig_fieldAccessorTable;
   static final com.google.protobuf.Descriptors.Descriptor
-      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlClassificationConfig_descriptor;
+      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAnnotateVideoResponse_descriptor;
   static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlClassificationConfig_fieldAccessorTable;
+      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAnnotateVideoResponse_fieldAccessorTable;
   static final com.google.protobuf.Descriptors.Descriptor
-      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlObjectTrackingConfig_descriptor;
+      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingVideoAnnotationResults_descriptor;
   static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlObjectTrackingConfig_fieldAccessorTable;
+      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingVideoAnnotationResults_fieldAccessorTable;
   static final com.google.protobuf.Descriptors.Descriptor
-      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingExplicitContentDetectionConfig_descriptor;
+      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingShotChangeDetectionConfig_descriptor;
   static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingExplicitContentDetectionConfig_fieldAccessorTable;
+      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingShotChangeDetectionConfig_fieldAccessorTable;
   static final com.google.protobuf.Descriptors.Descriptor
       internal_static_google_cloud_videointelligence_v1p3beta1_StreamingLabelDetectionConfig_descriptor;
   static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
       internal_static_google_cloud_videointelligence_v1p3beta1_StreamingLabelDetectionConfig_fieldAccessorTable;
+  static final com.google.protobuf.Descriptors.Descriptor
+      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingExplicitContentDetectionConfig_descriptor;
+  static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingExplicitContentDetectionConfig_fieldAccessorTable;
   static final com.google.protobuf.Descriptors.Descriptor
       internal_static_google_cloud_videointelligence_v1p3beta1_StreamingObjectTrackingConfig_descriptor;
   static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
       internal_static_google_cloud_videointelligence_v1p3beta1_StreamingObjectTrackingConfig_fieldAccessorTable;
   static final com.google.protobuf.Descriptors.Descriptor
-      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingShotChangeDetectionConfig_descriptor;
+      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlActionRecognitionConfig_descriptor;
   static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingShotChangeDetectionConfig_fieldAccessorTable;
+      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlActionRecognitionConfig_fieldAccessorTable;
   static final com.google.protobuf.Descriptors.Descriptor
-      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingStorageConfig_descriptor;
+      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlClassificationConfig_descriptor;
   static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingStorageConfig_fieldAccessorTable;
+      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlClassificationConfig_fieldAccessorTable;
   static final com.google.protobuf.Descriptors.Descriptor
-      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingVideoAnnotationResults_descriptor;
+      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlObjectTrackingConfig_descriptor;
   static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingVideoAnnotationResults_fieldAccessorTable;
+      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlObjectTrackingConfig_fieldAccessorTable;
   static final com.google.protobuf.Descriptors.Descriptor
-      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingVideoConfig_descriptor;
+      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingStorageConfig_descriptor;
   static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingVideoConfig_fieldAccessorTable;
+      internal_static_google_cloud_videointelligence_v1p3beta1_StreamingStorageConfig_fieldAccessorTable;
 
   public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
     return descriptor;
@@ -467,13 +471,13 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
           + ".cloud.videointelligence.v1p3beta1.Norma"
           + "lizedBoundingBox\022.\n\013time_offset\030\002 \001(\0132\031."
           + "google.protobuf.Duration\"\254\002\n\030ObjectTrack"
-          + "ingAnnotation\022@\n\006entity\030\001 \001(\01320.google.c"
-          + "loud.videointelligence.v1p3beta1.Entity\022"
-          + "\022\n\nconfidence\030\004 \001(\002\022M\n\006frames\030\002 \003(\0132=.go"
-          + "ogle.cloud.videointelligence.v1p3beta1.O"
-          + "bjectTrackingFrame\022I\n\007segment\030\003 \001(\01326.go"
-          + "ogle.cloud.videointelligence.v1p3beta1.V"
-          + "ideoSegmentH\000\022\022\n\010track_id\030\005 \001(\003H\000B\014\n\ntra"
+          + "ingAnnotation\022I\n\007segment\030\003 \001(\01326.google."
+          + "cloud.videointelligence.v1p3beta1.VideoS"
+          + "egmentH\000\022\022\n\010track_id\030\005 \001(\003H\000\022@\n\006entity\030\001"
+          + " \001(\01320.google.cloud.videointelligence.v1"
+          + "p3beta1.Entity\022\022\n\nconfidence\030\004 \001(\002\022M\n\006fr"
+          + "ames\030\002 \003(\0132=.google.cloud.videointellige"
+          + "nce.v1p3beta1.ObjectTrackingFrameB\014\n\ntra"
           + "ck_info\"\350\001\n\031LogoRecognitionAnnotation\022@\n"
           + "\006entity\030\001 \001(\01320.google.cloud.videointell"
           + "igence.v1p3beta1.Entity\022?\n\006tracks\030\002 \003(\0132"
@@ -484,97 +488,103 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
           + "video_config\030\001 \001(\0132>.google.cloud.videoi"
           + "ntelligence.v1p3beta1.StreamingVideoConf"
           + "igH\000\022\027\n\rinput_content\030\002 \001(\014H\000B\023\n\021streami"
-          + "ng_request\"\312\001\n\036StreamingAnnotateVideoRes"
-          + "ponse\022!\n\005error\030\001 \001(\0132\022.google.rpc.Status"
-          + "\022e\n\022annotation_results\030\002 \001(\0132I.google.cl"
-          + "oud.videointelligence.v1p3beta1.Streamin"
-          + "gVideoAnnotationResults\022\036\n\026annotation_re"
-          + "sults_uri\030\003 \001(\t\"9\n#StreamingAutomlClassi"
-          + "ficationConfig\022\022\n\nmodel_name\030\001 \001(\t\"9\n#St"
-          + "reamingAutomlObjectTrackingConfig\022\022\n\nmod"
-          + "el_name\030\001 \001(\t\")\n\'StreamingExplicitConten"
-          + "tDetectionConfig\":\n\035StreamingLabelDetect"
-          + "ionConfig\022\031\n\021stationary_camera\030\001 \001(\010\"\037\n\035"
-          + "StreamingObjectTrackingConfig\"$\n\"Streami"
-          + "ngShotChangeDetectionConfig\"o\n\026Streaming"
-          + "StorageConfig\022(\n enable_storage_annotati"
-          + "on_result\030\001 \001(\010\022+\n#annotation_result_sto"
-          + "rage_directory\030\003 \001(\t\"\213\003\n\037StreamingVideoA"
-          + "nnotationResults\022P\n\020shot_annotations\030\001 \003"
-          + "(\01326.google.cloud.videointelligence.v1p3"
-          + "beta1.VideoSegment\022T\n\021label_annotations\030"
-          + "\002 \003(\01329.google.cloud.videointelligence.v"
-          + "1p3beta1.LabelAnnotation\022`\n\023explicit_ann"
-          + "otation\030\003 \001(\0132C.google.cloud.videointell"
-          + "igence.v1p3beta1.ExplicitContentAnnotati"
-          + "on\022^\n\022object_annotations\030\004 \003(\0132B.google."
-          + "cloud.videointelligence.v1p3beta1.Object"
-          + "TrackingAnnotation\"\214\007\n\024StreamingVideoCon"
-          + "fig\022K\n\007feature\030\001 \001(\0162:.google.cloud.vide"
-          + "ointelligence.v1p3beta1.StreamingFeature"
-          + "\022t\n\034shot_change_detection_config\030\002 \001(\0132L"
-          + ".google.cloud.videointelligence.v1p3beta"
-          + "1.StreamingShotChangeDetectionConfigH\000\022i"
-          + "\n\026label_detection_config\030\003 \001(\0132G.google."
-          + "cloud.videointelligence.v1p3beta1.Stream"
-          + "ingLabelDetectionConfigH\000\022~\n!explicit_co"
-          + "ntent_detection_config\030\004 \001(\0132Q.google.cl"
-          + "oud.videointelligence.v1p3beta1.Streamin"
-          + "gExplicitContentDetectionConfigH\000\022i\n\026obj"
-          + "ect_tracking_config\030\005 \001(\0132G.google.cloud"
-          + ".videointelligence.v1p3beta1.StreamingOb"
-          + "jectTrackingConfigH\000\022u\n\034automl_classific"
-          + "ation_config\030\025 \001(\0132M.google.cloud.videoi"
-          + "ntelligence.v1p3beta1.StreamingAutomlCla"
-          + "ssificationConfigH\000\022v\n\035automl_object_tra"
-          + "cking_config\030\026 \001(\0132M.google.cloud.videoi"
-          + "ntelligence.v1p3beta1.StreamingAutomlObj"
-          + "ectTrackingConfigH\000\022X\n\016storage_config\030\036 "
-          + "\001(\0132@.google.cloud.videointelligence.v1p"
-          + "3beta1.StreamingStorageConfigB\022\n\020streami"
-          + "ng_config*\220\002\n\007Feature\022\027\n\023FEATURE_UNSPECI"
-          + "FIED\020\000\022\023\n\017LABEL_DETECTION\020\001\022\031\n\025SHOT_CHAN"
-          + "GE_DETECTION\020\002\022\036\n\032EXPLICIT_CONTENT_DETEC"
-          + "TION\020\003\022\022\n\016FACE_DETECTION\020\004\022\030\n\024SPEECH_TRA"
-          + "NSCRIPTION\020\006\022\022\n\016TEXT_DETECTION\020\007\022\023\n\017OBJE"
-          + "CT_TRACKING\020\t\022\024\n\020LOGO_RECOGNITION\020\014\022\031\n\025C"
-          + "ELEBRITY_RECOGNITION\020\r\022\024\n\020PERSON_DETECTI"
-          + "ON\020\016*r\n\022LabelDetectionMode\022$\n LABEL_DETE"
-          + "CTION_MODE_UNSPECIFIED\020\000\022\r\n\tSHOT_MODE\020\001\022"
-          + "\016\n\nFRAME_MODE\020\002\022\027\n\023SHOT_AND_FRAME_MODE\020\003"
-          + "*t\n\nLikelihood\022\032\n\026LIKELIHOOD_UNSPECIFIED"
-          + "\020\000\022\021\n\rVERY_UNLIKELY\020\001\022\014\n\010UNLIKELY\020\002\022\014\n\010P"
-          + "OSSIBLE\020\003\022\n\n\006LIKELY\020\004\022\017\n\013VERY_LIKELY\020\005*\215"
-          + "\002\n\020StreamingFeature\022!\n\035STREAMING_FEATURE"
-          + "_UNSPECIFIED\020\000\022\035\n\031STREAMING_LABEL_DETECT"
-          + "ION\020\001\022#\n\037STREAMING_SHOT_CHANGE_DETECTION"
-          + "\020\002\022(\n$STREAMING_EXPLICIT_CONTENT_DETECTI"
-          + "ON\020\003\022\035\n\031STREAMING_OBJECT_TRACKING\020\004\022#\n\037S"
-          + "TREAMING_AUTOML_CLASSIFICATION\020\025\022$\n STRE"
-          + "AMING_AUTOML_OBJECT_TRACKING\020\0262\316\002\n\030Video"
-          + "IntelligenceService\022\333\001\n\rAnnotateVideo\022>."
+          + "ng_request\"\212\010\n\024StreamingVideoConfig\022t\n\034s"
+          + "hot_change_detection_config\030\002 \001(\0132L.goog"
+          + "le.cloud.videointelligence.v1p3beta1.Str"
+          + "eamingShotChangeDetectionConfigH\000\022i\n\026lab"
+          + "el_detection_config\030\003 \001(\0132G.google.cloud"
+          + ".videointelligence.v1p3beta1.StreamingLa"
+          + "belDetectionConfigH\000\022~\n!explicit_content"
+          + "_detection_config\030\004 \001(\0132Q.google.cloud.v"
+          + "ideointelligence.v1p3beta1.StreamingExpl"
+          + "icitContentDetectionConfigH\000\022i\n\026object_t"
+          + "racking_config\030\005 \001(\0132G.google.cloud.vide"
+          + "ointelligence.v1p3beta1.StreamingObjectT"
+          + "rackingConfigH\000\022|\n automl_action_recogni"
+          + "tion_config\030\027 \001(\0132P.google.cloud.videoin"
+          + "telligence.v1p3beta1.StreamingAutomlActi"
+          + "onRecognitionConfigH\000\022u\n\034automl_classifi"
+          + "cation_config\030\025 \001(\0132M.google.cloud.video"
+          + "intelligence.v1p3beta1.StreamingAutomlCl"
+          + "assificationConfigH\000\022v\n\035automl_object_tr"
+          + "acking_config\030\026 \001(\0132M.google.cloud.video"
+          + "intelligence.v1p3beta1.StreamingAutomlOb"
+          + "jectTrackingConfigH\000\022K\n\007feature\030\001 \001(\0162:."
           + "google.cloud.videointelligence.v1p3beta1"
-          + ".AnnotateVideoRequest\032\035.google.longrunni"
-          + "ng.Operation\"k\202\323\344\223\002\037\"\032/v1p3beta1/videos:"
-          + "annotate:\001*\332A\022input_uri,features\312A.\n\025Ann"
-          + "otateVideoResponse\022\025AnnotateVideoProgres"
-          + "s\032T\312A videointelligence.googleapis.com\322A"
-          + ".https://www.googleapis.com/auth/cloud-p"
-          + "latform2\255\002\n!StreamingVideoIntelligenceSe"
-          + "rvice\022\261\001\n\026StreamingAnnotateVideo\022G.googl"
-          + "e.cloud.videointelligence.v1p3beta1.Stre"
-          + "amingAnnotateVideoRequest\032H.google.cloud"
-          + ".videointelligence.v1p3beta1.StreamingAn"
-          + "notateVideoResponse\"\000(\0010\001\032T\312A videointel"
-          + "ligence.googleapis.com\322A.https://www.goo"
-          + "gleapis.com/auth/cloud-platformB\200\002\n,com."
+          + ".StreamingFeature\022X\n\016storage_config\030\036 \001("
+          + "\0132@.google.cloud.videointelligence.v1p3b"
+          + "eta1.StreamingStorageConfigB\022\n\020streaming"
+          + "_config\"\312\001\n\036StreamingAnnotateVideoRespon"
+          + "se\022!\n\005error\030\001 \001(\0132\022.google.rpc.Status\022e\n"
+          + "\022annotation_results\030\002 \001(\0132I.google.cloud"
+          + ".videointelligence.v1p3beta1.StreamingVi"
+          + "deoAnnotationResults\022\036\n\026annotation_resul"
+          + "ts_uri\030\003 \001(\t\"\213\003\n\037StreamingVideoAnnotatio"
+          + "nResults\022P\n\020shot_annotations\030\001 \003(\01326.goo"
+          + "gle.cloud.videointelligence.v1p3beta1.Vi"
+          + "deoSegment\022T\n\021label_annotations\030\002 \003(\01329."
           + "google.cloud.videointelligence.v1p3beta1"
-          + "B\035VideoIntelligenceServiceProtoP\001ZYgoogl"
-          + "e.golang.org/genproto/googleapis/cloud/v"
-          + "ideointelligence/v1p3beta1;videointellig"
-          + "ence\252\002(Google.Cloud.VideoIntelligence.V1"
-          + "P3Beta1\312\002(Google\\Cloud\\VideoIntelligence"
-          + "\\V1p3beta1b\006proto3"
+          + ".LabelAnnotation\022`\n\023explicit_annotation\030"
+          + "\003 \001(\0132C.google.cloud.videointelligence.v"
+          + "1p3beta1.ExplicitContentAnnotation\022^\n\022ob"
+          + "ject_annotations\030\004 \003(\0132B.google.cloud.vi"
+          + "deointelligence.v1p3beta1.ObjectTracking"
+          + "Annotation\"$\n\"StreamingShotChangeDetecti"
+          + "onConfig\":\n\035StreamingLabelDetectionConfi"
+          + "g\022\031\n\021stationary_camera\030\001 \001(\010\")\n\'Streamin"
+          + "gExplicitContentDetectionConfig\"\037\n\035Strea"
+          + "mingObjectTrackingConfig\"<\n&StreamingAut"
+          + "omlActionRecognitionConfig\022\022\n\nmodel_name"
+          + "\030\001 \001(\t\"9\n#StreamingAutomlClassificationC"
+          + "onfig\022\022\n\nmodel_name\030\001 \001(\t\"9\n#StreamingAu"
+          + "tomlObjectTrackingConfig\022\022\n\nmodel_name\030\001"
+          + " \001(\t\"o\n\026StreamingStorageConfig\022(\n enable"
+          + "_storage_annotation_result\030\001 \001(\010\022+\n#anno"
+          + "tation_result_storage_directory\030\003 \001(\t*r\n"
+          + "\022LabelDetectionMode\022$\n LABEL_DETECTION_M"
+          + "ODE_UNSPECIFIED\020\000\022\r\n\tSHOT_MODE\020\001\022\016\n\nFRAM"
+          + "E_MODE\020\002\022\027\n\023SHOT_AND_FRAME_MODE\020\003*t\n\nLik"
+          + "elihood\022\032\n\026LIKELIHOOD_UNSPECIFIED\020\000\022\021\n\rV"
+          + "ERY_UNLIKELY\020\001\022\014\n\010UNLIKELY\020\002\022\014\n\010POSSIBLE"
+          + "\020\003\022\n\n\006LIKELY\020\004\022\017\n\013VERY_LIKELY\020\005*\266\002\n\020Stre"
+          + "amingFeature\022!\n\035STREAMING_FEATURE_UNSPEC"
+          + "IFIED\020\000\022\035\n\031STREAMING_LABEL_DETECTION\020\001\022#"
+          + "\n\037STREAMING_SHOT_CHANGE_DETECTION\020\002\022(\n$S"
+          + "TREAMING_EXPLICIT_CONTENT_DETECTION\020\003\022\035\n"
+          + "\031STREAMING_OBJECT_TRACKING\020\004\022\'\n#STREAMIN"
+          + "G_AUTOML_ACTION_RECOGNITION\020\027\022#\n\037STREAMI"
+          + "NG_AUTOML_CLASSIFICATION\020\025\022$\n STREAMING_"
+          + "AUTOML_OBJECT_TRACKING\020\026*\220\002\n\007Feature\022\027\n\023"
+          + "FEATURE_UNSPECIFIED\020\000\022\023\n\017LABEL_DETECTION"
+          + "\020\001\022\031\n\025SHOT_CHANGE_DETECTION\020\002\022\036\n\032EXPLICI"
+          + "T_CONTENT_DETECTION\020\003\022\022\n\016FACE_DETECTION\020"
+          + "\004\022\030\n\024SPEECH_TRANSCRIPTION\020\006\022\022\n\016TEXT_DETE"
+          + "CTION\020\007\022\023\n\017OBJECT_TRACKING\020\t\022\024\n\020LOGO_REC"
+          + "OGNITION\020\014\022\031\n\025CELEBRITY_RECOGNITION\020\r\022\024\n"
+          + "\020PERSON_DETECTION\020\0162\316\002\n\030VideoIntelligenc"
+          + "eService\022\333\001\n\rAnnotateVideo\022>.google.clou"
+          + "d.videointelligence.v1p3beta1.AnnotateVi"
+          + "deoRequest\032\035.google.longrunning.Operatio"
+          + "n\"k\202\323\344\223\002\037\"\032/v1p3beta1/videos:annotate:\001*"
+          + "\332A\022input_uri,features\312A.\n\025AnnotateVideoR"
+          + "esponse\022\025AnnotateVideoProgress\032T\312A video"
+          + "intelligence.googleapis.com\322A.https://ww"
+          + "w.googleapis.com/auth/cloud-platform2\255\002\n"
+          + "!StreamingVideoIntelligenceService\022\261\001\n\026S"
+          + "treamingAnnotateVideo\022G.google.cloud.vid"
+          + "eointelligence.v1p3beta1.StreamingAnnota"
+          + "teVideoRequest\032H.google.cloud.videointel"
+          + "ligence.v1p3beta1.StreamingAnnotateVideo"
+          + "Response\"\000(\0010\001\032T\312A videointelligence.goo"
+          + "gleapis.com\322A.https://www.googleapis.com"
+          + "/auth/cloud-platformB\200\002\n,com.google.clou"
+          + "d.videointelligence.v1p3beta1B\035VideoInte"
+          + "lligenceServiceProtoP\001ZYgoogle.golang.or"
+          + "g/genproto/googleapis/cloud/videointelli"
+          + "gence/v1p3beta1;videointelligence\252\002(Goog"
+          + "le.Cloud.VideoIntelligence.V1P3Beta1\312\002(G"
+          + "oogle\\Cloud\\VideoIntelligence\\V1p3beta1b"
+          + "\006proto3"
     };
     descriptor =
         com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom(
@@ -968,7 +978,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
         new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
             internal_static_google_cloud_videointelligence_v1p3beta1_ObjectTrackingAnnotation_descriptor,
             new java.lang.String[] {
-              "Entity", "Confidence", "Frames", "Segment", "TrackId", "TrackInfo",
+              "Segment", "TrackId", "Entity", "Confidence", "Frames", "TrackInfo",
             });
     internal_static_google_cloud_videointelligence_v1p3beta1_LogoRecognitionAnnotation_descriptor =
         getDescriptor().getMessageTypes().get(42);
@@ -986,35 +996,44 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
             new java.lang.String[] {
               "VideoConfig", "InputContent", "StreamingRequest",
             });
-    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAnnotateVideoResponse_descriptor =
+    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingVideoConfig_descriptor =
         getDescriptor().getMessageTypes().get(44);
-    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAnnotateVideoResponse_fieldAccessorTable =
+    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingVideoConfig_fieldAccessorTable =
         new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
-            internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAnnotateVideoResponse_descriptor,
+            internal_static_google_cloud_videointelligence_v1p3beta1_StreamingVideoConfig_descriptor,
             new java.lang.String[] {
-              "Error", "AnnotationResults", "AnnotationResultsUri",
+              "ShotChangeDetectionConfig",
+              "LabelDetectionConfig",
+              "ExplicitContentDetectionConfig",
+              "ObjectTrackingConfig",
+              "AutomlActionRecognitionConfig",
+              "AutomlClassificationConfig",
+              "AutomlObjectTrackingConfig",
+              "Feature",
+              "StorageConfig",
+              "StreamingConfig",
             });
-    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlClassificationConfig_descriptor =
+    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAnnotateVideoResponse_descriptor =
         getDescriptor().getMessageTypes().get(45);
-    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlClassificationConfig_fieldAccessorTable =
+    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAnnotateVideoResponse_fieldAccessorTable =
         new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
-            internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlClassificationConfig_descriptor,
+            internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAnnotateVideoResponse_descriptor,
             new java.lang.String[] {
-              "ModelName",
+              "Error", "AnnotationResults", "AnnotationResultsUri",
             });
-    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlObjectTrackingConfig_descriptor =
+    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingVideoAnnotationResults_descriptor =
         getDescriptor().getMessageTypes().get(46);
-    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlObjectTrackingConfig_fieldAccessorTable =
+    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingVideoAnnotationResults_fieldAccessorTable =
         new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
-            internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlObjectTrackingConfig_descriptor,
+            internal_static_google_cloud_videointelligence_v1p3beta1_StreamingVideoAnnotationResults_descriptor,
             new java.lang.String[] {
-              "ModelName",
+              "ShotAnnotations", "LabelAnnotations", "ExplicitAnnotation", "ObjectAnnotations",
             });
-    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingExplicitContentDetectionConfig_descriptor =
+    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingShotChangeDetectionConfig_descriptor =
         getDescriptor().getMessageTypes().get(47);
-    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingExplicitContentDetectionConfig_fieldAccessorTable =
+    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingShotChangeDetectionConfig_fieldAccessorTable =
         new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
-            internal_static_google_cloud_videointelligence_v1p3beta1_StreamingExplicitContentDetectionConfig_descriptor,
+            internal_static_google_cloud_videointelligence_v1p3beta1_StreamingShotChangeDetectionConfig_descriptor,
             new java.lang.String[] {});
     internal_static_google_cloud_videointelligence_v1p3beta1_StreamingLabelDetectionConfig_descriptor =
         getDescriptor().getMessageTypes().get(48);
@@ -1024,49 +1043,49 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
             new java.lang.String[] {
               "StationaryCamera",
             });
-    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingObjectTrackingConfig_descriptor =
+    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingExplicitContentDetectionConfig_descriptor =
         getDescriptor().getMessageTypes().get(49);
-    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingObjectTrackingConfig_fieldAccessorTable =
+    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingExplicitContentDetectionConfig_fieldAccessorTable =
         new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
-            internal_static_google_cloud_videointelligence_v1p3beta1_StreamingObjectTrackingConfig_descriptor,
+            internal_static_google_cloud_videointelligence_v1p3beta1_StreamingExplicitContentDetectionConfig_descriptor,
             new java.lang.String[] {});
-    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingShotChangeDetectionConfig_descriptor =
+    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingObjectTrackingConfig_descriptor =
         getDescriptor().getMessageTypes().get(50);
-    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingShotChangeDetectionConfig_fieldAccessorTable =
+    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingObjectTrackingConfig_fieldAccessorTable =
         new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
-            internal_static_google_cloud_videointelligence_v1p3beta1_StreamingShotChangeDetectionConfig_descriptor,
+            internal_static_google_cloud_videointelligence_v1p3beta1_StreamingObjectTrackingConfig_descriptor,
             new java.lang.String[] {});
-    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingStorageConfig_descriptor =
+    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlActionRecognitionConfig_descriptor =
         getDescriptor().getMessageTypes().get(51);
-    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingStorageConfig_fieldAccessorTable =
+    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlActionRecognitionConfig_fieldAccessorTable =
         new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
-            internal_static_google_cloud_videointelligence_v1p3beta1_StreamingStorageConfig_descriptor,
+            internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlActionRecognitionConfig_descriptor,
             new java.lang.String[] {
-              "EnableStorageAnnotationResult", "AnnotationResultStorageDirectory",
+              "ModelName",
             });
-    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingVideoAnnotationResults_descriptor =
+    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlClassificationConfig_descriptor =
         getDescriptor().getMessageTypes().get(52);
-    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingVideoAnnotationResults_fieldAccessorTable =
+    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlClassificationConfig_fieldAccessorTable =
         new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
-            internal_static_google_cloud_videointelligence_v1p3beta1_StreamingVideoAnnotationResults_descriptor,
+            internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlClassificationConfig_descriptor,
             new java.lang.String[] {
-              "ShotAnnotations", "LabelAnnotations", "ExplicitAnnotation", "ObjectAnnotations",
+              "ModelName",
             });
-    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingVideoConfig_descriptor =
+    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlObjectTrackingConfig_descriptor =
         getDescriptor().getMessageTypes().get(53);
-    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingVideoConfig_fieldAccessorTable =
+    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlObjectTrackingConfig_fieldAccessorTable =
         new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
-            internal_static_google_cloud_videointelligence_v1p3beta1_StreamingVideoConfig_descriptor,
+            internal_static_google_cloud_videointelligence_v1p3beta1_StreamingAutomlObjectTrackingConfig_descriptor,
             new java.lang.String[] {
-              "Feature",
-              "ShotChangeDetectionConfig",
-              "LabelDetectionConfig",
-              "ExplicitContentDetectionConfig",
-              "ObjectTrackingConfig",
-              "AutomlClassificationConfig",
-              "AutomlObjectTrackingConfig",
-              "StorageConfig",
-              "StreamingConfig",
+              "ModelName",
+            });
+    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingStorageConfig_descriptor =
+        getDescriptor().getMessageTypes().get(54);
+    internal_static_google_cloud_videointelligence_v1p3beta1_StreamingStorageConfig_fieldAccessorTable =
+        new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+            internal_static_google_cloud_videointelligence_v1p3beta1_StreamingStorageConfig_descriptor,
+            new java.lang.String[] {
+              "EnableStorageAnnotationResult", "AnnotationResultStorageDirectory",
             });
     com.google.protobuf.ExtensionRegistry registry =
         com.google.protobuf.ExtensionRegistry.newInstance();
diff --git a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/proto/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/proto/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto
index 3d418e2ff..6284e0db3 100644
--- a/proto-google-cloud-video-intelligence-v1p3beta1/src/main/proto/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto
+++ b/proto-google-cloud-video-intelligence-v1p3beta1/src/main/proto/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto
@@ -1,4 +1,4 @@
-// Copyright 2020 Google LLC.
+// Copyright 2020 Google LLC
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -11,7 +11,6 @@
 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 // See the License for the specific language governing permissions and
 // limitations under the License.
-//
 
 syntax = "proto3";
 
@@ -32,7 +31,7 @@ option java_outer_classname = "VideoIntelligenceServiceProto";
 option java_package = "com.google.cloud.videointelligence.v1p3beta1";
 option php_namespace = "Google\\Cloud\\VideoIntelligence\\V1p3beta1";
 
-// Service that implements Google Cloud Video Intelligence API.
+// Service that implements the Video Intelligence API.
 service VideoIntelligenceService {
   option (google.api.default_host) = "videointelligence.googleapis.com";
   option (google.api.oauth_scopes) =
@@ -56,7 +55,7 @@ service VideoIntelligenceService {
   }
 }
 
-// Service that implements streaming Google Cloud Video Intelligence API.
+// Service that implements streaming Video Intelligence API.
 service StreamingVideoIntelligenceService {
   option (google.api.default_host) = "videointelligence.googleapis.com";
   option (google.api.oauth_scopes) =
@@ -72,20 +71,21 @@ service StreamingVideoIntelligenceService {
 // Video annotation request.
 message AnnotateVideoRequest {
   // Input video location. Currently, only
-  // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
-  // supported, which must be specified in the following format:
+  // [Cloud Storage](https://cloud.google.com/storage/) URIs are
+  // supported. URIs must be specified in the following format:
   // `gs://bucket-id/object-id` (other URI formats return
   // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
-  // more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). A video
-  // URI may include wildcards in `object-id`, and thus identify multiple
-  // videos. Supported wildcards: '*' to match 0 or more characters;
+  // more information, see [Request
+  // URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
+  // multiple videos, a video URI may include wildcards in the `object-id`.
+  // Supported wildcards: '*' to match 0 or more characters;
   // '?' to match 1 character. If unset, the input video should be embedded
-  // in the request as `input_content`. If set, `input_content` should be unset.
+  // in the request as `input_content`. If set, `input_content` must be unset.
   string input_uri = 1;
 
   // The video data bytes.
-  // If unset, the input video(s) should be specified via `input_uri`.
-  // If set, `input_uri` should be unset.
+  // If unset, the input video(s) should be specified via the `input_uri`.
+  // If set, `input_uri` must be unset.
   bytes input_content = 6;
 
   // Required. Requested video annotation features.
@@ -95,16 +95,18 @@ message AnnotateVideoRequest {
   VideoContext video_context = 3;
 
   // Optional. Location where the output (in JSON format) should be stored.
-  // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
-  // URIs are supported, which must be specified in the following format:
+  // Currently, only [Cloud Storage](https://cloud.google.com/storage/)
+  // URIs are supported. These must be specified in the following format:
   // `gs://bucket-id/object-id` (other URI formats return
   // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
-  // more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
+  // more information, see [Request
+  // URIs](https://cloud.google.com/storage/docs/request-endpoints).
   string output_uri = 4 [(google.api.field_behavior) = OPTIONAL];
 
   // Optional. Cloud region where annotation should take place. Supported cloud
-  // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
-  // is specified, a region will be determined based on video file location.
+  // regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
+  // region is specified, the region will be determined based on video file
+  // location.
   string location_id = 5 [(google.api.field_behavior) = OPTIONAL];
 }
 
@@ -140,6 +142,42 @@ message VideoContext {
   ObjectTrackingConfig object_tracking_config = 13;
 }
 
+// Label detection mode.
+enum LabelDetectionMode {
+  // Unspecified.
+  LABEL_DETECTION_MODE_UNSPECIFIED = 0;
+
+  // Detect shot-level labels.
+  SHOT_MODE = 1;
+
+  // Detect frame-level labels.
+  FRAME_MODE = 2;
+
+  // Detect both shot-level and frame-level labels.
+  SHOT_AND_FRAME_MODE = 3;
+}
+
+// Bucketized representation of likelihood.
+enum Likelihood {
+  // Unspecified likelihood.
+  LIKELIHOOD_UNSPECIFIED = 0;
+
+  // Very unlikely.
+  VERY_UNLIKELY = 1;
+
+  // Unlikely.
+  UNLIKELY = 2;
+
+  // Possible.
+  POSSIBLE = 3;
+
+  // Likely.
+  LIKELY = 4;
+
+  // Very likely.
+  VERY_LIKELY = 5;
+}
+
 // Config for LABEL_DETECTION.
 message LabelDetectionConfig {
   // What labels should be detected with LABEL_DETECTION, in addition to
@@ -147,9 +185,9 @@ message LabelDetectionConfig {
   // If unspecified, defaults to `SHOT_MODE`.
   LabelDetectionMode label_detection_mode = 1;
 
-  // Whether the video has been shot from a stationary (i.e. non-moving) camera.
-  // When set to true, might improve detection accuracy for moving objects.
-  // Should be used with `SHOT_AND_FRAME_MODE` enabled.
+  // Whether the video has been shot from a stationary (i.e., non-moving)
+  // camera. When set to true, might improve detection accuracy for moving
+  // objects. Should be used with `SHOT_AND_FRAME_MODE` enabled.
   bool stationary_camera = 2;
 
   // Model to use for label detection.
@@ -161,19 +199,82 @@ message LabelDetectionConfig {
   // frame-level detection. If not set, it is set to 0.4 by default. The valid
   // range for this threshold is [0.1, 0.9]. Any value set outside of this
   // range will be clipped.
-  // Note: for best results please follow the default threshold. We will update
+  // Note: For best results, follow the default threshold. We will update
   // the default threshold everytime when we release a new model.
   float frame_confidence_threshold = 4;
 
   // The confidence threshold we perform filtering on the labels from
-  // video-level and shot-level detections. If not set, it is set to 0.3 by
+  // video-level and shot-level detections. If not set, it's set to 0.3 by
   // default. The valid range for this threshold is [0.1, 0.9]. Any value set
   // outside of this range will be clipped.
-  // Note: for best results please follow the default threshold. We will update
+  // Note: For best results, follow the default threshold. We will update
   // the default threshold everytime when we release a new model.
   float video_confidence_threshold = 5;
 }
 
+// Streaming video annotation feature.
+enum StreamingFeature {
+  // Unspecified.
+  STREAMING_FEATURE_UNSPECIFIED = 0;
+
+  // Label detection. Detect objects, such as dog or flower.
+  STREAMING_LABEL_DETECTION = 1;
+
+  // Shot change detection.
+  STREAMING_SHOT_CHANGE_DETECTION = 2;
+
+  // Explicit content detection.
+  STREAMING_EXPLICIT_CONTENT_DETECTION = 3;
+
+  // Object detection and tracking.
+  STREAMING_OBJECT_TRACKING = 4;
+
+  // Action recognition based on AutoML model.
+  STREAMING_AUTOML_ACTION_RECOGNITION = 23;
+
+  // Video classification based on AutoML model.
+  STREAMING_AUTOML_CLASSIFICATION = 21;
+
+  // Object detection and tracking based on AutoML model.
+  STREAMING_AUTOML_OBJECT_TRACKING = 22;
+}
+
+// Video annotation feature.
+enum Feature {
+  // Unspecified.
+  FEATURE_UNSPECIFIED = 0;
+
+  // Label detection. Detect objects, such as dog or flower.
+  LABEL_DETECTION = 1;
+
+  // Shot change detection.
+  SHOT_CHANGE_DETECTION = 2;
+
+  // Explicit content detection.
+  EXPLICIT_CONTENT_DETECTION = 3;
+
+  // Human face detection.
+  FACE_DETECTION = 4;
+
+  // Speech transcription.
+  SPEECH_TRANSCRIPTION = 6;
+
+  // OCR text detection and tracking.
+  TEXT_DETECTION = 7;
+
+  // Object detection and tracking.
+  OBJECT_TRACKING = 9;
+
+  // Logo detection, tracking, and recognition.
+  LOGO_RECOGNITION = 12;
+
+  // Celebrity recognition.
+  CELEBRITY_RECOGNITION = 13;
+
+  // Person detection.
+  PERSON_DETECTION = 14;
+}
+
 // Config for SHOT_CHANGE_DETECTION.
 message ShotChangeDetectionConfig {
   // Model to use for shot change detection.
@@ -205,28 +306,28 @@ message FaceDetectionConfig {
   // "builtin/latest".
   string model = 1;
 
-  // Whether bounding boxes be included in the face annotation output.
+  // Whether bounding boxes are included in the face annotation output.
   bool include_bounding_boxes = 2;
 
   // Whether to enable face attributes detection, such as glasses, dark_glasses,
-  // mouth_open etc. Ignored if 'include_bounding_boxes' is false.
+  // mouth_open etc. Ignored if 'include_bounding_boxes' is set to false.
   bool include_attributes = 5;
 }
 
 // Config for PERSON_DETECTION.
 message PersonDetectionConfig {
-  // Whether bounding boxes be included in the person detection annotation
+  // Whether bounding boxes are included in the person detection annotation
   // output.
   bool include_bounding_boxes = 1;
 
   // Whether to enable pose landmarks detection. Ignored if
-  // 'include_bounding_boxes' is false.
+  // 'include_bounding_boxes' is set to false.
   bool include_pose_landmarks = 2;
 
   // Whether to enable person attributes detection, such as cloth color (black,
-  // blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair
-  // color (black, blonde, etc), hair length (long, short, bald), etc.
-  // Ignored if 'include_bounding_boxes' is false.
+  // blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair,
+  // etc.
+  // Ignored if 'include_bounding_boxes' is set to false.
   bool include_attributes = 3;
 }
 
@@ -282,7 +383,7 @@ message Entity {
   // API](https://developers.google.com/knowledge-graph/).
   string entity_id = 1;
 
-  // Textual description, e.g. `Fixed-gear bicycle`.
+  // Textual description, e.g., `Fixed-gear bicycle`.
   string description = 2;
 
   // Language code for `description` in BCP-47 format.
@@ -295,9 +396,9 @@ message LabelAnnotation {
   Entity entity = 1;
 
   // Common categories for the detected entity.
-  // E.g. when the label is `Terrier` the category is likely `dog`. And in some
-  // cases there might be more than one categories e.g. `Terrier` could also be
-  // a `pet`.
+  // For example, when the label is `Terrier`, the category is likely `dog`. And
+  // in some cases there might be more than one categories e.g., `Terrier` could
+  // also be a `pet`.
   repeated Entity category_entities = 2;
 
   // All video segments where a label was detected.
@@ -380,7 +481,7 @@ message Track {
 
 // A generic detected attribute represented by name in string format.
 message DetectedAttribute {
-  // The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.
+  // The name of the attribute, for example, glasses, dark_glasses, mouth_open.
   // A full list of supported type names will be provided in the document.
   string name = 1;
 
@@ -437,7 +538,7 @@ message CelebrityRecognitionAnnotation {
 // A generic detected landmark represented by name in string format and a 2D
 // location.
 message DetectedLandmark {
-  // The name of this landmark, i.e. left_hand, right_shoulder.
+  // The name of this landmark, for example, left_hand, right_shoulder.
   string name = 1;
 
   // The 2D point of the detected landmark using the normalized image
@@ -459,24 +560,24 @@ message FaceDetectionAnnotation {
 
 // Person detection annotation per video.
 message PersonDetectionAnnotation {
-  // The trackes that a person is detected.
+  // The detected tracks of a person.
   repeated Track tracks = 1;
 }
 
 // Annotation results for a single video.
 message VideoAnnotationResults {
   // Video file location in
-  // [Google Cloud Storage](https://cloud.google.com/storage/).
+  // [Cloud Storage](https://cloud.google.com/storage/).
   string input_uri = 1;
 
   // Video segment on which the annotation is run.
   VideoSegment segment = 10;
 
-  // Topical label annotations on video level or user specified segment level.
+  // Topical label annotations on video level or user-specified segment level.
   // There is exactly one element for each unique label.
   repeated LabelAnnotation segment_label_annotations = 2;
 
-  // Presence label annotations on video level or user specified segment level.
+  // Presence label annotations on video level or user-specified segment level.
   // There is exactly one element for each unique label. Compared to the
   // existing topical `segment_label_annotations`, this field presents more
   // fine-grained, segment-level labels detected in video content and is made
@@ -544,7 +645,7 @@ message AnnotateVideoResponse {
 // Annotation progress for a single video.
 message VideoAnnotationProgress {
   // Video file location in
-  // [Google Cloud Storage](https://cloud.google.com/storage/).
+  // [Cloud Storage](https://cloud.google.com/storage/).
   string input_uri = 1;
 
   // Approximate percentage processed thus far. Guaranteed to be
@@ -558,11 +659,11 @@ message VideoAnnotationProgress {
   google.protobuf.Timestamp update_time = 4;
 
   // Specifies which feature is being tracked if the request contains more than
-  // one features.
+  // one feature.
   Feature feature = 5;
 
   // Specifies which segment is being tracked if the request contains more than
-  // one segments.
+  // one segment.
   VideoSegment segment = 6;
 }
 
@@ -617,7 +718,7 @@ message SpeechTranscriptionConfig {
   // the top alternative of the recognition result using a speaker_tag provided
   // in the WordInfo.
   // Note: When this is true, we send all the words from the beginning of the
-  // audio for the top alternative in every consecutive responses.
+  // audio for the top alternative in every consecutive response.
   // This is done in order to improve our speaker tags as our models learn to
   // identify the speakers in the conversation over time.
   bool enable_speaker_diarization = 7 [(google.api.field_behavior) = OPTIONAL];
@@ -673,8 +774,8 @@ message SpeechRecognitionAlternative {
   float confidence = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
 
   // Output only. A list of word-specific information for each recognized word.
-  // Note: When `enable_speaker_diarization` is true, you will see all the words
-  // from the beginning of the audio.
+  // Note: When `enable_speaker_diarization` is set to true, you will see all
+  // the words from the beginning of the audio.
   repeated WordInfo words = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
 }
 
@@ -792,18 +893,6 @@ message ObjectTrackingFrame {
 
 // Annotations corresponding to one tracked object.
 message ObjectTrackingAnnotation {
-  // Entity to specify the object category that this track is labeled as.
-  Entity entity = 1;
-
-  // Object category's labeling confidence of this track.
-  float confidence = 4;
-
-  // Information corresponding to all frames where this object track appears.
-  // Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
-  // messages in frames.
-  // Streaming mode: it can only be one ObjectTrackingFrame message in frames.
-  repeated ObjectTrackingFrame frames = 2;
-
   // Different representation of tracking info in non-streaming batch
   // and streaming modes.
   oneof track_info {
@@ -819,6 +908,18 @@ message ObjectTrackingAnnotation {
     // ObjectTrackAnnotation of the same track_id over time.
     int64 track_id = 5;
   }
+
+  // Entity to specify the object category that this track is labeled as.
+  Entity entity = 1;
+
+  // Object category's labeling confidence of this track.
+  float confidence = 4;
+
+  // Information corresponding to all frames where this object track appears.
+  // Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
+  // messages in frames.
+  // Streaming mode: it can only be one ObjectTrackingFrame message in frames.
+  repeated ObjectTrackingFrame frames = 2;
 }
 
 // Annotation corresponding to one detected, tracked and recognized logo class.
@@ -860,95 +961,9 @@ message StreamingAnnotateVideoRequest {
   }
 }
 
-// `StreamingAnnotateVideoResponse` is the only message returned to the client
-// by `StreamingAnnotateVideo`. A series of zero or more
-// `StreamingAnnotateVideoResponse` messages are streamed back to the client.
-message StreamingAnnotateVideoResponse {
-  // If set, returns a [google.rpc.Status][google.rpc.Status] message that
-  // specifies the error for the operation.
-  google.rpc.Status error = 1;
-
-  // Streaming annotation results.
-  StreamingVideoAnnotationResults annotation_results = 2;
-
-  // GCS URI that stores annotation results of one streaming session.
-  // It is a directory that can hold multiple files in JSON format.
-  // Example uri format:
-  // gs://bucket_id/object_id/cloud_project_name-session_id
-  string annotation_results_uri = 3;
-}
-
-// Config for STREAMING_AUTOML_CLASSIFICATION.
-message StreamingAutomlClassificationConfig {
-  // Resource name of AutoML model.
-  // Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
-  string model_name = 1;
-}
-
-// Config for STREAMING_AUTOML_OBJECT_TRACKING.
-message StreamingAutomlObjectTrackingConfig {
-  // Resource name of AutoML model.
-  // Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
-  string model_name = 1;
-}
-
-// Config for STREAMING_EXPLICIT_CONTENT_DETECTION.
-message StreamingExplicitContentDetectionConfig {}
-
-// Config for STREAMING_LABEL_DETECTION.
-message StreamingLabelDetectionConfig {
-  // Whether the video has been captured from a stationary (i.e. non-moving)
-  // camera. When set to true, might improve detection accuracy for moving
-  // objects. Default: false.
-  bool stationary_camera = 1;
-}
-
-// Config for STREAMING_OBJECT_TRACKING.
-message StreamingObjectTrackingConfig {}
-
-// Config for STREAMING_SHOT_CHANGE_DETECTION.
-message StreamingShotChangeDetectionConfig {}
-
-// Config for streaming storage option.
-message StreamingStorageConfig {
-  // Enable streaming storage. Default: false.
-  bool enable_storage_annotation_result = 1;
-
-  // GCS URI to store all annotation results for one client. Client should
-  // specify this field as the top-level storage directory. Annotation results
-  // of different sessions will be put into different sub-directories denoted
-  // by project_name and session_id. All sub-directories will be auto generated
-  // by program and will be made accessible to client in response proto.
-  // URIs must be specified in the following format: `gs://bucket-id/object-id`
-  // `bucket-id` should be a valid GCS bucket created by client and bucket
-  // permission shall also be configured properly. `object-id` can be arbitrary
-  // string that make sense to client. Other URI formats will return error and
-  // cause GCS write failure.
-  string annotation_result_storage_directory = 3;
-}
-
-// Streaming annotation results corresponding to a portion of the video
-// that is currently being processed.
-message StreamingVideoAnnotationResults {
-  // Shot annotation results. Each shot is represented as a video segment.
-  repeated VideoSegment shot_annotations = 1;
-
-  // Label annotation results.
-  repeated LabelAnnotation label_annotations = 2;
-
-  // Explicit content annotation results.
-  ExplicitContentAnnotation explicit_annotation = 3;
-
-  // Object tracking results.
-  repeated ObjectTrackingAnnotation object_annotations = 4;
-}
-
 // Provides information to the annotator that specifies how to process the
 // request.
 message StreamingVideoConfig {
-  // Requested annotation feature.
-  StreamingFeature feature = 1;
-
   // Config for requested annotation feature.
   oneof streaming_config {
     // Config for STREAMING_SHOT_CHANGE_DETECTION.
@@ -964,6 +979,10 @@ message StreamingVideoConfig {
     // Config for STREAMING_OBJECT_TRACKING.
     StreamingObjectTrackingConfig object_tracking_config = 5;
 
+    // Config for STREAMING_AUTOML_ACTION_RECOGNITION.
+    StreamingAutomlActionRecognitionConfig automl_action_recognition_config =
+        23;
+
     // Config for STREAMING_AUTOML_CLASSIFICATION.
     StreamingAutomlClassificationConfig automl_classification_config = 21;
 
@@ -971,102 +990,100 @@ message StreamingVideoConfig {
     StreamingAutomlObjectTrackingConfig automl_object_tracking_config = 22;
   }
 
+  // Requested annotation feature.
+  StreamingFeature feature = 1;
+
   // Streaming storage option. By default: storage is disabled.
   StreamingStorageConfig storage_config = 30;
 }
 
-// Video annotation feature.
-enum Feature {
-  // Unspecified.
-  FEATURE_UNSPECIFIED = 0;
-
-  // Label detection. Detect objects, such as dog or flower.
-  LABEL_DETECTION = 1;
-
-  // Shot change detection.
-  SHOT_CHANGE_DETECTION = 2;
-
-  // Explicit content detection.
-  EXPLICIT_CONTENT_DETECTION = 3;
-
-  // Human face detection.
-  FACE_DETECTION = 4;
-
-  // Speech transcription.
-  SPEECH_TRANSCRIPTION = 6;
-
-  // OCR text detection and tracking.
-  TEXT_DETECTION = 7;
-
-  // Object detection and tracking.
-  OBJECT_TRACKING = 9;
-
-  // Logo detection, tracking, and recognition.
-  LOGO_RECOGNITION = 12;
+// `StreamingAnnotateVideoResponse` is the only message returned to the client
+// by `StreamingAnnotateVideo`. A series of zero or more
+// `StreamingAnnotateVideoResponse` messages are streamed back to the client.
+message StreamingAnnotateVideoResponse {
+  // If set, returns a [google.rpc.Status][google.rpc.Status] message that
+  // specifies the error for the operation.
+  google.rpc.Status error = 1;
 
-  // Celebrity recognition.
-  CELEBRITY_RECOGNITION = 13;
+  // Streaming annotation results.
+  StreamingVideoAnnotationResults annotation_results = 2;
 
-  // Person detection.
-  PERSON_DETECTION = 14;
+  // Google Cloud Storage(GCS) URI that stores annotation results of one
+  // streaming session in JSON format.
+  // It is the annotation_result_storage_directory
+  // from the request followed by '/cloud_project_number-session_id'.
+  string annotation_results_uri = 3;
 }
 
-// Label detection mode.
-enum LabelDetectionMode {
-  // Unspecified.
-  LABEL_DETECTION_MODE_UNSPECIFIED = 0;
+// Streaming annotation results corresponding to a portion of the video
+// that is currently being processed.
+message StreamingVideoAnnotationResults {
+  // Shot annotation results. Each shot is represented as a video segment.
+  repeated VideoSegment shot_annotations = 1;
 
-  // Detect shot-level labels.
-  SHOT_MODE = 1;
+  // Label annotation results.
+  repeated LabelAnnotation label_annotations = 2;
 
-  // Detect frame-level labels.
-  FRAME_MODE = 2;
+  // Explicit content annotation results.
+  ExplicitContentAnnotation explicit_annotation = 3;
 
-  // Detect both shot-level and frame-level labels.
-  SHOT_AND_FRAME_MODE = 3;
+  // Object tracking results.
+  repeated ObjectTrackingAnnotation object_annotations = 4;
 }
 
-// Bucketized representation of likelihood.
-enum Likelihood {
-  // Unspecified likelihood.
-  LIKELIHOOD_UNSPECIFIED = 0;
-
-  // Very unlikely.
-  VERY_UNLIKELY = 1;
-
-  // Unlikely.
-  UNLIKELY = 2;
-
-  // Possible.
-  POSSIBLE = 3;
-
-  // Likely.
-  LIKELY = 4;
+// Config for STREAMING_SHOT_CHANGE_DETECTION.
+message StreamingShotChangeDetectionConfig {}
 
-  // Very likely.
-  VERY_LIKELY = 5;
+// Config for STREAMING_LABEL_DETECTION.
+message StreamingLabelDetectionConfig {
+  // Whether the video has been captured from a stationary (i.e. non-moving)
+  // camera. When set to true, might improve detection accuracy for moving
+  // objects. Default: false.
+  bool stationary_camera = 1;
 }
 
-// Streaming video annotation feature.
-enum StreamingFeature {
-  // Unspecified.
-  STREAMING_FEATURE_UNSPECIFIED = 0;
+// Config for STREAMING_EXPLICIT_CONTENT_DETECTION.
+message StreamingExplicitContentDetectionConfig {}
 
-  // Label detection. Detect objects, such as dog or flower.
-  STREAMING_LABEL_DETECTION = 1;
+// Config for STREAMING_OBJECT_TRACKING.
+message StreamingObjectTrackingConfig {}
 
-  // Shot change detection.
-  STREAMING_SHOT_CHANGE_DETECTION = 2;
+// Config for STREAMING_AUTOML_ACTION_RECOGNITION.
+message StreamingAutomlActionRecognitionConfig {
+  // Resource name of AutoML model.
+  // Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
+  string model_name = 1;
+}
 
-  // Explicit content detection.
-  STREAMING_EXPLICIT_CONTENT_DETECTION = 3;
+// Config for STREAMING_AUTOML_CLASSIFICATION.
+message StreamingAutomlClassificationConfig {
+  // Resource name of AutoML model.
+  // Format:
+  // `projects/{project_number}/locations/{location_id}/models/{model_id}`
+  string model_name = 1;
+}
 
-  // Object detection and tracking.
-  STREAMING_OBJECT_TRACKING = 4;
+// Config for STREAMING_AUTOML_OBJECT_TRACKING.
+message StreamingAutomlObjectTrackingConfig {
+  // Resource name of AutoML model.
+  // Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
+  string model_name = 1;
+}
 
-  // Video classification based on AutoML model.
-  STREAMING_AUTOML_CLASSIFICATION = 21;
+// Config for streaming storage option.
+message StreamingStorageConfig {
+  // Enable streaming storage. Default: false.
+  bool enable_storage_annotation_result = 1;
 
-  // Object detection and tracking based on AutoML model.
-  STREAMING_AUTOML_OBJECT_TRACKING = 22;
+  // Cloud Storage URI to store all annotation results for one client. Client
+  // should specify this field as the top-level storage directory. Annotation
+  // results of different sessions will be put into different sub-directories
+  // denoted by project_name and session_id. All sub-directories will be auto
+  // generated by program and will be made accessible to client in response
+  // proto. URIs must be specified in the following format:
+  // `gs://bucket-id/object-id` `bucket-id` should be a valid Cloud Storage
+  // bucket created by client and bucket permission shall also be configured
+  // properly. `object-id` can be arbitrary string that make sense to client.
+  // Other URI formats will return error and cause Cloud Storage write failure.
+  string annotation_result_storage_directory = 3;
 }
diff --git a/synth.metadata b/synth.metadata
index 65351ce39..6118293f6 100644
--- a/synth.metadata
+++ b/synth.metadata
@@ -11,8 +11,8 @@
       "git": {
         "name": "googleapis",
         "remote": "https://github.com/googleapis/googleapis.git",
-        "sha": "aed11c01e52921613b9ee469c2d85f5f33175fb7",
-        "internalRef": "310660461"
+        "sha": "d1a9f02fd4fb263bae0383b4a5af0bbef33753d6",
+        "internalRef": "312101156"
       }
     },
     {