diff --git a/google-cloud-video-intelligence/src/main/java/com/google/cloud/videointelligence/v1/VideoIntelligenceServiceClient.java b/google-cloud-video-intelligence/src/main/java/com/google/cloud/videointelligence/v1/VideoIntelligenceServiceClient.java index 9915c9598..882bb6fe1 100644 --- a/google-cloud-video-intelligence/src/main/java/com/google/cloud/videointelligence/v1/VideoIntelligenceServiceClient.java +++ b/google-cloud-video-intelligence/src/main/java/com/google/cloud/videointelligence/v1/VideoIntelligenceServiceClient.java @@ -31,7 +31,7 @@ // AUTO-GENERATED DOCUMENTATION AND SERVICE /** - * Service Description: Service that implements Google Cloud Video Intelligence API. + * Service Description: Service that implements the Video Intelligence API. * *

This class provides the ability to make remote calls to the backing service through method * calls that map to API methods. Sample code to get started: @@ -185,15 +185,15 @@ public final OperationsClient getOperationsClient() { * } * * - * @param inputUri Input video location. Currently, only [Google Cloud - * Storage](https://cloud.google.com/storage/) URIs are supported, which must be specified in + * @param inputUri Input video location. Currently, only [Cloud + * Storage](https://cloud.google.com/storage/) URIs are supported. URIs must be specified in * the following format: `gs://bucket-id/object-id` (other URI formats return * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more - * information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). A - * video URI may include wildcards in `object-id`, and thus identify multiple videos. + * information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). + * To identify multiple videos, a video URI may include wildcards in the `object-id`. * Supported wildcards: '*' to match 0 or more characters; '?' to match 1 character. If * unset, the input video should be embedded in the request as `input_content`. If set, - * `input_content` should be unset. + * `input_content` must be unset. * @param features Required. Requested video annotation features. * @throws com.google.api.gax.rpc.ApiException if the remote call fails */ diff --git a/google-cloud-video-intelligence/src/main/java/com/google/cloud/videointelligence/v1/package-info.java b/google-cloud-video-intelligence/src/main/java/com/google/cloud/videointelligence/v1/package-info.java index 66eeae43d..b13cc56d7 100644 --- a/google-cloud-video-intelligence/src/main/java/com/google/cloud/videointelligence/v1/package-info.java +++ b/google-cloud-video-intelligence/src/main/java/com/google/cloud/videointelligence/v1/package-info.java @@ -21,7 +21,7 @@ * *

============================== VideoIntelligenceServiceClient ============================== * - *

Service Description: Service that implements Google Cloud Video Intelligence API. + *

Service Description: Service that implements the Video Intelligence API. * *

Sample for VideoIntelligenceServiceClient: * diff --git a/grpc-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoIntelligenceServiceGrpc.java b/grpc-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoIntelligenceServiceGrpc.java index fe94d85af..67210f911 100644 --- a/grpc-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoIntelligenceServiceGrpc.java +++ b/grpc-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoIntelligenceServiceGrpc.java @@ -26,7 +26,7 @@ * * *

- * Service that implements Google Cloud Video Intelligence API.
+ * Service that implements the Video Intelligence API.
  * 
*/ @javax.annotation.Generated( @@ -132,7 +132,7 @@ public VideoIntelligenceServiceFutureStub newStub( * * *
-   * Service that implements Google Cloud Video Intelligence API.
+   * Service that implements the Video Intelligence API.
    * 
*/ public abstract static class VideoIntelligenceServiceImplBase implements io.grpc.BindableService { @@ -170,7 +170,7 @@ public final io.grpc.ServerServiceDefinition bindService() { * * *
-   * Service that implements Google Cloud Video Intelligence API.
+   * Service that implements the Video Intelligence API.
    * 
*/ public static final class VideoIntelligenceServiceStub @@ -209,7 +209,7 @@ public void annotateVideo( * * *
-   * Service that implements Google Cloud Video Intelligence API.
+   * Service that implements the Video Intelligence API.
    * 
*/ public static final class VideoIntelligenceServiceBlockingStub @@ -245,7 +245,7 @@ public com.google.longrunning.Operation annotateVideo( * * *
-   * Service that implements Google Cloud Video Intelligence API.
+   * Service that implements the Video Intelligence API.
    * 
*/ public static final class VideoIntelligenceServiceFutureStub diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/AnnotateVideoRequest.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/AnnotateVideoRequest.java index de50e5f6f..92f0c9a52 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/AnnotateVideoRequest.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/AnnotateVideoRequest.java @@ -187,15 +187,16 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * *
    * Input video location. Currently, only
-   * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
-   * supported, which must be specified in the following format:
+   * [Cloud Storage](https://cloud.google.com/storage/) URIs are
+   * supported. URIs must be specified in the following format:
    * `gs://bucket-id/object-id` (other URI formats return
-   * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
-   * [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
-   * A video URI may include wildcards in `object-id`, and thus identify
-   * multiple videos. Supported wildcards: '*' to match 0 or more characters;
+   * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
+   * more information, see [Request
+   * URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
+   * multiple videos, a video URI may include wildcards in the `object-id`.
+   * Supported wildcards: '*' to match 0 or more characters;
    * '?' to match 1 character. If unset, the input video should be embedded
-   * in the request as `input_content`. If set, `input_content` should be unset.
+   * in the request as `input_content`. If set, `input_content` must be unset.
    * 
* * string input_uri = 1; @@ -219,15 +220,16 @@ public java.lang.String getInputUri() { * *
    * Input video location. Currently, only
-   * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
-   * supported, which must be specified in the following format:
+   * [Cloud Storage](https://cloud.google.com/storage/) URIs are
+   * supported. URIs must be specified in the following format:
    * `gs://bucket-id/object-id` (other URI formats return
-   * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
-   * [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
-   * A video URI may include wildcards in `object-id`, and thus identify
-   * multiple videos. Supported wildcards: '*' to match 0 or more characters;
+   * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
+   * more information, see [Request
+   * URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
+   * multiple videos, a video URI may include wildcards in the `object-id`.
+   * Supported wildcards: '*' to match 0 or more characters;
    * '?' to match 1 character. If unset, the input video should be embedded
-   * in the request as `input_content`. If set, `input_content` should be unset.
+   * in the request as `input_content`. If set, `input_content` must be unset.
    * 
* * string input_uri = 1; @@ -254,8 +256,8 @@ public com.google.protobuf.ByteString getInputUriBytes() { * *
    * The video data bytes.
-   * If unset, the input video(s) should be specified via `input_uri`.
-   * If set, `input_uri` should be unset.
+   * If unset, the input video(s) should be specified via the `input_uri`.
+   * If set, `input_uri` must be unset.
    * 
* * bytes input_content = 6; @@ -430,11 +432,12 @@ public com.google.cloud.videointelligence.v1.VideoContextOrBuilder getVideoConte * *
    * Optional. Location where the output (in JSON format) should be stored.
-   * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
-   * URIs are supported, which must be specified in the following format:
+   * Currently, only [Cloud Storage](https://cloud.google.com/storage/)
+   * URIs are supported. These must be specified in the following format:
    * `gs://bucket-id/object-id` (other URI formats return
-   * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
-   * [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
+   * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
+   * more information, see [Request
+   * URIs](https://cloud.google.com/storage/docs/request-endpoints).
    * 
* * string output_uri = 4 [(.google.api.field_behavior) = OPTIONAL]; @@ -458,11 +461,12 @@ public java.lang.String getOutputUri() { * *
    * Optional. Location where the output (in JSON format) should be stored.
-   * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
-   * URIs are supported, which must be specified in the following format:
+   * Currently, only [Cloud Storage](https://cloud.google.com/storage/)
+   * URIs are supported. These must be specified in the following format:
    * `gs://bucket-id/object-id` (other URI formats return
-   * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
-   * [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
+   * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
+   * more information, see [Request
+   * URIs](https://cloud.google.com/storage/docs/request-endpoints).
    * 
* * string output_uri = 4 [(.google.api.field_behavior) = OPTIONAL]; @@ -489,8 +493,9 @@ public com.google.protobuf.ByteString getOutputUriBytes() { * *
    * Optional. Cloud region where annotation should take place. Supported cloud
-   * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
-   * is specified, a region will be determined based on video file location.
+   * regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
+   * region is specified, the region will be determined based on video file
+   * location.
    * 
* * string location_id = 5 [(.google.api.field_behavior) = OPTIONAL]; @@ -514,8 +519,9 @@ public java.lang.String getLocationId() { * *
    * Optional. Cloud region where annotation should take place. Supported cloud
-   * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
-   * is specified, a region will be determined based on video file location.
+   * regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
+   * region is specified, the region will be determined based on video file
+   * location.
    * 
* * string location_id = 5 [(.google.api.field_behavior) = OPTIONAL]; @@ -980,15 +986,16 @@ public Builder mergeFrom( * *
      * Input video location. Currently, only
-     * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
-     * supported, which must be specified in the following format:
+     * [Cloud Storage](https://cloud.google.com/storage/) URIs are
+     * supported. URIs must be specified in the following format:
      * `gs://bucket-id/object-id` (other URI formats return
-     * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
-     * [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
-     * A video URI may include wildcards in `object-id`, and thus identify
-     * multiple videos. Supported wildcards: '*' to match 0 or more characters;
+     * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
+     * more information, see [Request
+     * URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
+     * multiple videos, a video URI may include wildcards in the `object-id`.
+     * Supported wildcards: '*' to match 0 or more characters;
      * '?' to match 1 character. If unset, the input video should be embedded
-     * in the request as `input_content`. If set, `input_content` should be unset.
+     * in the request as `input_content`. If set, `input_content` must be unset.
      * 
* * string input_uri = 1; @@ -1011,15 +1018,16 @@ public java.lang.String getInputUri() { * *
      * Input video location. Currently, only
-     * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
-     * supported, which must be specified in the following format:
+     * [Cloud Storage](https://cloud.google.com/storage/) URIs are
+     * supported. URIs must be specified in the following format:
      * `gs://bucket-id/object-id` (other URI formats return
-     * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
-     * [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
-     * A video URI may include wildcards in `object-id`, and thus identify
-     * multiple videos. Supported wildcards: '*' to match 0 or more characters;
+     * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
+     * more information, see [Request
+     * URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
+     * multiple videos, a video URI may include wildcards in the `object-id`.
+     * Supported wildcards: '*' to match 0 or more characters;
      * '?' to match 1 character. If unset, the input video should be embedded
-     * in the request as `input_content`. If set, `input_content` should be unset.
+     * in the request as `input_content`. If set, `input_content` must be unset.
      * 
* * string input_uri = 1; @@ -1042,15 +1050,16 @@ public com.google.protobuf.ByteString getInputUriBytes() { * *
      * Input video location. Currently, only
-     * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
-     * supported, which must be specified in the following format:
+     * [Cloud Storage](https://cloud.google.com/storage/) URIs are
+     * supported. URIs must be specified in the following format:
      * `gs://bucket-id/object-id` (other URI formats return
-     * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
-     * [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
-     * A video URI may include wildcards in `object-id`, and thus identify
-     * multiple videos. Supported wildcards: '*' to match 0 or more characters;
+     * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
+     * more information, see [Request
+     * URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
+     * multiple videos, a video URI may include wildcards in the `object-id`.
+     * Supported wildcards: '*' to match 0 or more characters;
      * '?' to match 1 character. If unset, the input video should be embedded
-     * in the request as `input_content`. If set, `input_content` should be unset.
+     * in the request as `input_content`. If set, `input_content` must be unset.
      * 
* * string input_uri = 1; @@ -1072,15 +1081,16 @@ public Builder setInputUri(java.lang.String value) { * *
      * Input video location. Currently, only
-     * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
-     * supported, which must be specified in the following format:
+     * [Cloud Storage](https://cloud.google.com/storage/) URIs are
+     * supported. URIs must be specified in the following format:
      * `gs://bucket-id/object-id` (other URI formats return
-     * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
-     * [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
-     * A video URI may include wildcards in `object-id`, and thus identify
-     * multiple videos. Supported wildcards: '*' to match 0 or more characters;
+     * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
+     * more information, see [Request
+     * URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
+     * multiple videos, a video URI may include wildcards in the `object-id`.
+     * Supported wildcards: '*' to match 0 or more characters;
      * '?' to match 1 character. If unset, the input video should be embedded
-     * in the request as `input_content`. If set, `input_content` should be unset.
+     * in the request as `input_content`. If set, `input_content` must be unset.
      * 
* * string input_uri = 1; @@ -1098,15 +1108,16 @@ public Builder clearInputUri() { * *
      * Input video location. Currently, only
-     * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
-     * supported, which must be specified in the following format:
+     * [Cloud Storage](https://cloud.google.com/storage/) URIs are
+     * supported. URIs must be specified in the following format:
      * `gs://bucket-id/object-id` (other URI formats return
-     * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
-     * [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
-     * A video URI may include wildcards in `object-id`, and thus identify
-     * multiple videos. Supported wildcards: '*' to match 0 or more characters;
+     * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
+     * more information, see [Request
+     * URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
+     * multiple videos, a video URI may include wildcards in the `object-id`.
+     * Supported wildcards: '*' to match 0 or more characters;
      * '?' to match 1 character. If unset, the input video should be embedded
-     * in the request as `input_content`. If set, `input_content` should be unset.
+     * in the request as `input_content`. If set, `input_content` must be unset.
      * 
* * string input_uri = 1; @@ -1131,8 +1142,8 @@ public Builder setInputUriBytes(com.google.protobuf.ByteString value) { * *
      * The video data bytes.
-     * If unset, the input video(s) should be specified via `input_uri`.
-     * If set, `input_uri` should be unset.
+     * If unset, the input video(s) should be specified via the `input_uri`.
+     * If set, `input_uri` must be unset.
      * 
* * bytes input_content = 6; @@ -1148,8 +1159,8 @@ public com.google.protobuf.ByteString getInputContent() { * *
      * The video data bytes.
-     * If unset, the input video(s) should be specified via `input_uri`.
-     * If set, `input_uri` should be unset.
+     * If unset, the input video(s) should be specified via the `input_uri`.
+     * If set, `input_uri` must be unset.
      * 
* * bytes input_content = 6; @@ -1171,8 +1182,8 @@ public Builder setInputContent(com.google.protobuf.ByteString value) { * *
      * The video data bytes.
-     * If unset, the input video(s) should be specified via `input_uri`.
-     * If set, `input_uri` should be unset.
+     * If unset, the input video(s) should be specified via the `input_uri`.
+     * If set, `input_uri` must be unset.
      * 
* * bytes input_content = 6; @@ -1623,11 +1634,12 @@ public com.google.cloud.videointelligence.v1.VideoContextOrBuilder getVideoConte * *
      * Optional. Location where the output (in JSON format) should be stored.
-     * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
-     * URIs are supported, which must be specified in the following format:
+     * Currently, only [Cloud Storage](https://cloud.google.com/storage/)
+     * URIs are supported. These must be specified in the following format:
      * `gs://bucket-id/object-id` (other URI formats return
-     * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
-     * [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
+     * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
+     * more information, see [Request
+     * URIs](https://cloud.google.com/storage/docs/request-endpoints).
      * 
* * string output_uri = 4 [(.google.api.field_behavior) = OPTIONAL]; @@ -1650,11 +1662,12 @@ public java.lang.String getOutputUri() { * *
      * Optional. Location where the output (in JSON format) should be stored.
-     * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
-     * URIs are supported, which must be specified in the following format:
+     * Currently, only [Cloud Storage](https://cloud.google.com/storage/)
+     * URIs are supported. These must be specified in the following format:
      * `gs://bucket-id/object-id` (other URI formats return
-     * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
-     * [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
+     * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
+     * more information, see [Request
+     * URIs](https://cloud.google.com/storage/docs/request-endpoints).
      * 
* * string output_uri = 4 [(.google.api.field_behavior) = OPTIONAL]; @@ -1677,11 +1690,12 @@ public com.google.protobuf.ByteString getOutputUriBytes() { * *
      * Optional. Location where the output (in JSON format) should be stored.
-     * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
-     * URIs are supported, which must be specified in the following format:
+     * Currently, only [Cloud Storage](https://cloud.google.com/storage/)
+     * URIs are supported. These must be specified in the following format:
      * `gs://bucket-id/object-id` (other URI formats return
-     * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
-     * [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
+     * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
+     * more information, see [Request
+     * URIs](https://cloud.google.com/storage/docs/request-endpoints).
      * 
* * string output_uri = 4 [(.google.api.field_behavior) = OPTIONAL]; @@ -1703,11 +1717,12 @@ public Builder setOutputUri(java.lang.String value) { * *
      * Optional. Location where the output (in JSON format) should be stored.
-     * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
-     * URIs are supported, which must be specified in the following format:
+     * Currently, only [Cloud Storage](https://cloud.google.com/storage/)
+     * URIs are supported. These must be specified in the following format:
      * `gs://bucket-id/object-id` (other URI formats return
-     * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
-     * [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
+     * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
+     * more information, see [Request
+     * URIs](https://cloud.google.com/storage/docs/request-endpoints).
      * 
* * string output_uri = 4 [(.google.api.field_behavior) = OPTIONAL]; @@ -1725,11 +1740,12 @@ public Builder clearOutputUri() { * *
      * Optional. Location where the output (in JSON format) should be stored.
-     * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
-     * URIs are supported, which must be specified in the following format:
+     * Currently, only [Cloud Storage](https://cloud.google.com/storage/)
+     * URIs are supported. These must be specified in the following format:
      * `gs://bucket-id/object-id` (other URI formats return
-     * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
-     * [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
+     * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
+     * more information, see [Request
+     * URIs](https://cloud.google.com/storage/docs/request-endpoints).
      * 
* * string output_uri = 4 [(.google.api.field_behavior) = OPTIONAL]; @@ -1754,8 +1770,9 @@ public Builder setOutputUriBytes(com.google.protobuf.ByteString value) { * *
      * Optional. Cloud region where annotation should take place. Supported cloud
-     * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
-     * is specified, a region will be determined based on video file location.
+     * regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
+     * region is specified, the region will be determined based on video file
+     * location.
      * 
* * string location_id = 5 [(.google.api.field_behavior) = OPTIONAL]; @@ -1778,8 +1795,9 @@ public java.lang.String getLocationId() { * *
      * Optional. Cloud region where annotation should take place. Supported cloud
-     * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
-     * is specified, a region will be determined based on video file location.
+     * regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
+     * region is specified, the region will be determined based on video file
+     * location.
      * 
* * string location_id = 5 [(.google.api.field_behavior) = OPTIONAL]; @@ -1802,8 +1820,9 @@ public com.google.protobuf.ByteString getLocationIdBytes() { * *
      * Optional. Cloud region where annotation should take place. Supported cloud
-     * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
-     * is specified, a region will be determined based on video file location.
+     * regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
+     * region is specified, the region will be determined based on video file
+     * location.
      * 
* * string location_id = 5 [(.google.api.field_behavior) = OPTIONAL]; @@ -1825,8 +1844,9 @@ public Builder setLocationId(java.lang.String value) { * *
      * Optional. Cloud region where annotation should take place. Supported cloud
-     * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
-     * is specified, a region will be determined based on video file location.
+     * regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
+     * region is specified, the region will be determined based on video file
+     * location.
      * 
* * string location_id = 5 [(.google.api.field_behavior) = OPTIONAL]; @@ -1844,8 +1864,9 @@ public Builder clearLocationId() { * *
      * Optional. Cloud region where annotation should take place. Supported cloud
-     * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
-     * is specified, a region will be determined based on video file location.
+     * regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
+     * region is specified, the region will be determined based on video file
+     * location.
      * 
* * string location_id = 5 [(.google.api.field_behavior) = OPTIONAL]; diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/AnnotateVideoRequestOrBuilder.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/AnnotateVideoRequestOrBuilder.java index 44385e59f..3868569cc 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/AnnotateVideoRequestOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/AnnotateVideoRequestOrBuilder.java @@ -28,15 +28,16 @@ public interface AnnotateVideoRequestOrBuilder * *
    * Input video location. Currently, only
-   * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
-   * supported, which must be specified in the following format:
+   * [Cloud Storage](https://cloud.google.com/storage/) URIs are
+   * supported. URIs must be specified in the following format:
    * `gs://bucket-id/object-id` (other URI formats return
-   * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
-   * [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
-   * A video URI may include wildcards in `object-id`, and thus identify
-   * multiple videos. Supported wildcards: '*' to match 0 or more characters;
+   * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
+   * more information, see [Request
+   * URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
+   * multiple videos, a video URI may include wildcards in the `object-id`.
+   * Supported wildcards: '*' to match 0 or more characters;
    * '?' to match 1 character. If unset, the input video should be embedded
-   * in the request as `input_content`. If set, `input_content` should be unset.
+   * in the request as `input_content`. If set, `input_content` must be unset.
    * 
* * string input_uri = 1; @@ -49,15 +50,16 @@ public interface AnnotateVideoRequestOrBuilder * *
    * Input video location. Currently, only
-   * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
-   * supported, which must be specified in the following format:
+   * [Cloud Storage](https://cloud.google.com/storage/) URIs are
+   * supported. URIs must be specified in the following format:
    * `gs://bucket-id/object-id` (other URI formats return
-   * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
-   * [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
-   * A video URI may include wildcards in `object-id`, and thus identify
-   * multiple videos. Supported wildcards: '*' to match 0 or more characters;
+   * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
+   * more information, see [Request
+   * URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
+   * multiple videos, a video URI may include wildcards in the `object-id`.
+   * Supported wildcards: '*' to match 0 or more characters;
    * '?' to match 1 character. If unset, the input video should be embedded
-   * in the request as `input_content`. If set, `input_content` should be unset.
+   * in the request as `input_content`. If set, `input_content` must be unset.
    * 
* * string input_uri = 1; @@ -71,8 +73,8 @@ public interface AnnotateVideoRequestOrBuilder * *
    * The video data bytes.
-   * If unset, the input video(s) should be specified via `input_uri`.
-   * If set, `input_uri` should be unset.
+   * If unset, the input video(s) should be specified via the `input_uri`.
+   * If set, `input_uri` must be unset.
    * 
* * bytes input_content = 6; @@ -194,11 +196,12 @@ public interface AnnotateVideoRequestOrBuilder * *
    * Optional. Location where the output (in JSON format) should be stored.
-   * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
-   * URIs are supported, which must be specified in the following format:
+   * Currently, only [Cloud Storage](https://cloud.google.com/storage/)
+   * URIs are supported. These must be specified in the following format:
    * `gs://bucket-id/object-id` (other URI formats return
-   * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
-   * [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
+   * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
+   * more information, see [Request
+   * URIs](https://cloud.google.com/storage/docs/request-endpoints).
    * 
* * string output_uri = 4 [(.google.api.field_behavior) = OPTIONAL]; @@ -211,11 +214,12 @@ public interface AnnotateVideoRequestOrBuilder * *
    * Optional. Location where the output (in JSON format) should be stored.
-   * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
-   * URIs are supported, which must be specified in the following format:
+   * Currently, only [Cloud Storage](https://cloud.google.com/storage/)
+   * URIs are supported. These must be specified in the following format:
    * `gs://bucket-id/object-id` (other URI formats return
-   * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
-   * [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
+   * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
+   * more information, see [Request
+   * URIs](https://cloud.google.com/storage/docs/request-endpoints).
    * 
* * string output_uri = 4 [(.google.api.field_behavior) = OPTIONAL]; @@ -229,8 +233,9 @@ public interface AnnotateVideoRequestOrBuilder * *
    * Optional. Cloud region where annotation should take place. Supported cloud
-   * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
-   * is specified, a region will be determined based on video file location.
+   * regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
+   * region is specified, the region will be determined based on video file
+   * location.
    * 
* * string location_id = 5 [(.google.api.field_behavior) = OPTIONAL]; @@ -243,8 +248,9 @@ public interface AnnotateVideoRequestOrBuilder * *
    * Optional. Cloud region where annotation should take place. Supported cloud
-   * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
-   * is specified, a region will be determined based on video file location.
+   * regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
+   * region is specified, the region will be determined based on video file
+   * location.
    * 
* * string location_id = 5 [(.google.api.field_behavior) = OPTIONAL]; diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/DetectedAttribute.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/DetectedAttribute.java index f3c0c54f1..0b2c903d2 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/DetectedAttribute.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/DetectedAttribute.java @@ -130,7 +130,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-   * The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.
+   * The name of the attribute, for example, glasses, dark_glasses, mouth_open.
    * A full list of supported type names will be provided in the document.
    * 
* @@ -154,7 +154,7 @@ public java.lang.String getName() { * * *
-   * The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.
+   * The name of the attribute, for example, glasses, dark_glasses, mouth_open.
    * A full list of supported type names will be provided in the document.
    * 
* @@ -600,7 +600,7 @@ public Builder mergeFrom( * * *
-     * The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.
+     * The name of the attribute, for example, glasses, dark_glasses, mouth_open.
      * A full list of supported type names will be provided in the document.
      * 
* @@ -623,7 +623,7 @@ public java.lang.String getName() { * * *
-     * The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.
+     * The name of the attribute, for example, glasses, dark_glasses, mouth_open.
      * A full list of supported type names will be provided in the document.
      * 
* @@ -646,7 +646,7 @@ public com.google.protobuf.ByteString getNameBytes() { * * *
-     * The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.
+     * The name of the attribute, for example, glasses, dark_glasses, mouth_open.
      * A full list of supported type names will be provided in the document.
      * 
* @@ -668,7 +668,7 @@ public Builder setName(java.lang.String value) { * * *
-     * The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.
+     * The name of the attribute, for example, glasses, dark_glasses, mouth_open.
      * A full list of supported type names will be provided in the document.
      * 
* @@ -686,7 +686,7 @@ public Builder clearName() { * * *
-     * The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.
+     * The name of the attribute, for example, glasses, dark_glasses, mouth_open.
      * A full list of supported type names will be provided in the document.
      * 
* diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/DetectedAttributeOrBuilder.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/DetectedAttributeOrBuilder.java index c7185a720..96ead23cc 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/DetectedAttributeOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/DetectedAttributeOrBuilder.java @@ -27,7 +27,7 @@ public interface DetectedAttributeOrBuilder * * *
-   * The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.
+   * The name of the attribute, for example, glasses, dark_glasses, mouth_open.
    * A full list of supported type names will be provided in the document.
    * 
* @@ -40,7 +40,7 @@ public interface DetectedAttributeOrBuilder * * *
-   * The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.
+   * The name of the attribute, for example, glasses, dark_glasses, mouth_open.
    * A full list of supported type names will be provided in the document.
    * 
* diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/DetectedLandmark.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/DetectedLandmark.java index 719a5151c..b486061fc 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/DetectedLandmark.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/DetectedLandmark.java @@ -140,7 +140,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-   * The name of this landmark, i.e. left_hand, right_shoulder.
+   * The name of this landmark, for example, left_hand, right_shoulder.
    * 
* * string name = 1; @@ -163,7 +163,7 @@ public java.lang.String getName() { * * *
-   * The name of this landmark, i.e. left_hand, right_shoulder.
+   * The name of this landmark, for example, left_hand, right_shoulder.
    * 
* * string name = 1; @@ -621,7 +621,7 @@ public Builder mergeFrom( * * *
-     * The name of this landmark, i.e. left_hand, right_shoulder.
+     * The name of this landmark, for example, left_hand, right_shoulder.
      * 
* * string name = 1; @@ -643,7 +643,7 @@ public java.lang.String getName() { * * *
-     * The name of this landmark, i.e. left_hand, right_shoulder.
+     * The name of this landmark, for example, left_hand, right_shoulder.
      * 
* * string name = 1; @@ -665,7 +665,7 @@ public com.google.protobuf.ByteString getNameBytes() { * * *
-     * The name of this landmark, i.e. left_hand, right_shoulder.
+     * The name of this landmark, for example, left_hand, right_shoulder.
      * 
* * string name = 1; @@ -686,7 +686,7 @@ public Builder setName(java.lang.String value) { * * *
-     * The name of this landmark, i.e. left_hand, right_shoulder.
+     * The name of this landmark, for example, left_hand, right_shoulder.
      * 
* * string name = 1; @@ -703,7 +703,7 @@ public Builder clearName() { * * *
-     * The name of this landmark, i.e. left_hand, right_shoulder.
+     * The name of this landmark, for example, left_hand, right_shoulder.
      * 
* * string name = 1; diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/DetectedLandmarkOrBuilder.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/DetectedLandmarkOrBuilder.java index ff81d57af..8c16528c7 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/DetectedLandmarkOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/DetectedLandmarkOrBuilder.java @@ -27,7 +27,7 @@ public interface DetectedLandmarkOrBuilder * * *
-   * The name of this landmark, i.e. left_hand, right_shoulder.
+   * The name of this landmark, for example, left_hand, right_shoulder.
    * 
* * string name = 1; @@ -39,7 +39,7 @@ public interface DetectedLandmarkOrBuilder * * *
-   * The name of this landmark, i.e. left_hand, right_shoulder.
+   * The name of this landmark, for example, left_hand, right_shoulder.
    * 
* * string name = 1; diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/Entity.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/Entity.java index dfcc9c156..111281478 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/Entity.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/Entity.java @@ -186,7 +186,7 @@ public com.google.protobuf.ByteString getEntityIdBytes() { * * *
-   * Textual description, e.g. `Fixed-gear bicycle`.
+   * Textual description, e.g., `Fixed-gear bicycle`.
    * 
* * string description = 2; @@ -209,7 +209,7 @@ public java.lang.String getDescription() { * * *
-   * Textual description, e.g. `Fixed-gear bicycle`.
+   * Textual description, e.g., `Fixed-gear bicycle`.
    * 
* * string description = 2; @@ -747,7 +747,7 @@ public Builder setEntityIdBytes(com.google.protobuf.ByteString value) { * * *
-     * Textual description, e.g. `Fixed-gear bicycle`.
+     * Textual description, e.g., `Fixed-gear bicycle`.
      * 
* * string description = 2; @@ -769,7 +769,7 @@ public java.lang.String getDescription() { * * *
-     * Textual description, e.g. `Fixed-gear bicycle`.
+     * Textual description, e.g., `Fixed-gear bicycle`.
      * 
* * string description = 2; @@ -791,7 +791,7 @@ public com.google.protobuf.ByteString getDescriptionBytes() { * * *
-     * Textual description, e.g. `Fixed-gear bicycle`.
+     * Textual description, e.g., `Fixed-gear bicycle`.
      * 
* * string description = 2; @@ -812,7 +812,7 @@ public Builder setDescription(java.lang.String value) { * * *
-     * Textual description, e.g. `Fixed-gear bicycle`.
+     * Textual description, e.g., `Fixed-gear bicycle`.
      * 
* * string description = 2; @@ -829,7 +829,7 @@ public Builder clearDescription() { * * *
-     * Textual description, e.g. `Fixed-gear bicycle`.
+     * Textual description, e.g., `Fixed-gear bicycle`.
      * 
* * string description = 2; diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/EntityOrBuilder.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/EntityOrBuilder.java index c4d2e09b0..5675e881a 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/EntityOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/EntityOrBuilder.java @@ -56,7 +56,7 @@ public interface EntityOrBuilder * * *
-   * Textual description, e.g. `Fixed-gear bicycle`.
+   * Textual description, e.g., `Fixed-gear bicycle`.
    * 
* * string description = 2; @@ -68,7 +68,7 @@ public interface EntityOrBuilder * * *
-   * Textual description, e.g. `Fixed-gear bicycle`.
+   * Textual description, e.g., `Fixed-gear bicycle`.
    * 
* * string description = 2; diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/ExplicitContentAnnotation.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/ExplicitContentAnnotation.java index 6fea2b3b2..c7e39bdb8 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/ExplicitContentAnnotation.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/ExplicitContentAnnotation.java @@ -41,6 +41,7 @@ private ExplicitContentAnnotation(com.google.protobuf.GeneratedMessageV3.Builder private ExplicitContentAnnotation() { frames_ = java.util.Collections.emptyList(); + version_ = ""; } @java.lang.Override @@ -87,6 +88,13 @@ private ExplicitContentAnnotation( extensionRegistry)); break; } + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + + version_ = s; + break; + } default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { @@ -196,6 +204,55 @@ public com.google.cloud.videointelligence.v1.ExplicitContentFrameOrBuilder getFr return frames_.get(index); } + public static final int VERSION_FIELD_NUMBER = 2; + private volatile java.lang.Object version_; + /** + * + * + *
+   * Feature version.
+   * 
+ * + * string version = 2; + * + * @return The version. + */ + @java.lang.Override + public java.lang.String getVersion() { + java.lang.Object ref = version_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + version_ = s; + return s; + } + } + /** + * + * + *
+   * Feature version.
+   * 
+ * + * string version = 2; + * + * @return The bytes for version. + */ + @java.lang.Override + public com.google.protobuf.ByteString getVersionBytes() { + java.lang.Object ref = version_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + version_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -213,6 +270,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io for (int i = 0; i < frames_.size(); i++) { output.writeMessage(1, frames_.get(i)); } + if (!getVersionBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, version_); + } unknownFields.writeTo(output); } @@ -225,6 +285,9 @@ public int getSerializedSize() { for (int i = 0; i < frames_.size(); i++) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, frames_.get(i)); } + if (!getVersionBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, version_); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -242,6 +305,7 @@ public boolean equals(final java.lang.Object obj) { (com.google.cloud.videointelligence.v1.ExplicitContentAnnotation) obj; if (!getFramesList().equals(other.getFramesList())) return false; + if (!getVersion().equals(other.getVersion())) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -257,6 +321,8 @@ public int hashCode() { hash = (37 * hash) + FRAMES_FIELD_NUMBER; hash = (53 * hash) + getFramesList().hashCode(); } + hash = (37 * hash) + VERSION_FIELD_NUMBER; + hash = (53 * hash) + getVersion().hashCode(); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -413,6 +479,8 @@ public Builder clear() { } else { framesBuilder_.clear(); } + version_ = ""; + return this; } @@ -451,6 +519,7 @@ public com.google.cloud.videointelligence.v1.ExplicitContentAnnotation buildPart } else { result.frames_ = framesBuilder_.build(); } + result.version_ = version_; onBuilt(); return result; } @@ -530,6 +599,10 @@ public Builder mergeFrom( } } } + if (!other.getVersion().isEmpty()) { + version_ = other.version_; + onChanged(); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -928,6 +1001,112 @@ public com.google.cloud.videointelligence.v1.ExplicitContentFrame.Builder addFra return framesBuilder_; } + private java.lang.Object version_ = ""; + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 2; + * + * @return The version. + */ + public java.lang.String getVersion() { + java.lang.Object ref = version_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + version_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 2; + * + * @return The bytes for version. + */ + public com.google.protobuf.ByteString getVersionBytes() { + java.lang.Object ref = version_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + version_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 2; + * + * @param value The version to set. + * @return This builder for chaining. + */ + public Builder setVersion(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + version_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 2; + * + * @return This builder for chaining. + */ + public Builder clearVersion() { + + version_ = getDefaultInstance().getVersion(); + onChanged(); + return this; + } + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 2; + * + * @param value The bytes for version to set. + * @return This builder for chaining. + */ + public Builder setVersionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + version_ = value; + onChanged(); + return this; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/ExplicitContentAnnotationOrBuilder.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/ExplicitContentAnnotationOrBuilder.java index 996b43f52..69c735df7 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/ExplicitContentAnnotationOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/ExplicitContentAnnotationOrBuilder.java @@ -74,4 +74,29 @@ public interface ExplicitContentAnnotationOrBuilder * repeated .google.cloud.videointelligence.v1.ExplicitContentFrame frames = 1; */ com.google.cloud.videointelligence.v1.ExplicitContentFrameOrBuilder getFramesOrBuilder(int index); + + /** + * + * + *
+   * Feature version.
+   * 
+ * + * string version = 2; + * + * @return The version. + */ + java.lang.String getVersion(); + /** + * + * + *
+   * Feature version.
+   * 
+ * + * string version = 2; + * + * @return The bytes for version. + */ + com.google.protobuf.ByteString getVersionBytes(); } diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceAnnotation.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceAnnotation.java index bd0ce1e02..fca145f70 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceAnnotation.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceAnnotation.java @@ -22,11 +22,12 @@ * * *
- * Face annotation.
+ * Deprecated. No effect.
  * 
* * Protobuf type {@code google.cloud.videointelligence.v1.FaceAnnotation} */ +@java.lang.Deprecated public final class FaceAnnotation extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:google.cloud.videointelligence.v1.FaceAnnotation) @@ -485,7 +486,7 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
-   * Face annotation.
+   * Deprecated. No effect.
    * 
* * Protobuf type {@code google.cloud.videointelligence.v1.FaceAnnotation} diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceAnnotationOrBuilder.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceAnnotationOrBuilder.java index 3f3ea7157..db7ed407f 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceAnnotationOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceAnnotationOrBuilder.java @@ -18,6 +18,7 @@ package com.google.cloud.videointelligence.v1; +@java.lang.Deprecated public interface FaceAnnotationOrBuilder extends // @@protoc_insertion_point(interface_extends:google.cloud.videointelligence.v1.FaceAnnotation) diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceDetectionAnnotation.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceDetectionAnnotation.java new file mode 100644 index 000000000..38c9d79dc --- /dev/null +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceDetectionAnnotation.java @@ -0,0 +1,641 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/videointelligence/v1/video_intelligence.proto + +package com.google.cloud.videointelligence.v1; + +/** + * + * + *
+ * Face detection annotation.
+ * 
+ * + * Protobuf type {@code google.cloud.videointelligence.v1.FaceDetectionAnnotation} + */ +public final class FaceDetectionAnnotation extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.videointelligence.v1.FaceDetectionAnnotation) + FaceDetectionAnnotationOrBuilder { + private static final long serialVersionUID = 0L; + // Use FaceDetectionAnnotation.newBuilder() to construct. + private FaceDetectionAnnotation(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private FaceDetectionAnnotation() { + version_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new FaceDetectionAnnotation(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private FaceDetectionAnnotation( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 42: + { + java.lang.String s = input.readStringRequireUtf8(); + + version_ = s; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.videointelligence.v1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1_FaceDetectionAnnotation_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.videointelligence.v1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1_FaceDetectionAnnotation_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.videointelligence.v1.FaceDetectionAnnotation.class, + com.google.cloud.videointelligence.v1.FaceDetectionAnnotation.Builder.class); + } + + public static final int VERSION_FIELD_NUMBER = 5; + private volatile java.lang.Object version_; + /** + * + * + *
+   * Feature version.
+   * 
+ * + * string version = 5; + * + * @return The version. + */ + @java.lang.Override + public java.lang.String getVersion() { + java.lang.Object ref = version_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + version_ = s; + return s; + } + } + /** + * + * + *
+   * Feature version.
+   * 
+ * + * string version = 5; + * + * @return The bytes for version. + */ + @java.lang.Override + public com.google.protobuf.ByteString getVersionBytes() { + java.lang.Object ref = version_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + version_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getVersionBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 5, version_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getVersionBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(5, version_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.videointelligence.v1.FaceDetectionAnnotation)) { + return super.equals(obj); + } + com.google.cloud.videointelligence.v1.FaceDetectionAnnotation other = + (com.google.cloud.videointelligence.v1.FaceDetectionAnnotation) obj; + + if (!getVersion().equals(other.getVersion())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + VERSION_FIELD_NUMBER; + hash = (53 * hash) + getVersion().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.videointelligence.v1.FaceDetectionAnnotation parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.videointelligence.v1.FaceDetectionAnnotation parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.videointelligence.v1.FaceDetectionAnnotation parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.videointelligence.v1.FaceDetectionAnnotation parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.videointelligence.v1.FaceDetectionAnnotation parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.videointelligence.v1.FaceDetectionAnnotation parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.videointelligence.v1.FaceDetectionAnnotation parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.videointelligence.v1.FaceDetectionAnnotation parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.videointelligence.v1.FaceDetectionAnnotation parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.videointelligence.v1.FaceDetectionAnnotation parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.videointelligence.v1.FaceDetectionAnnotation parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.videointelligence.v1.FaceDetectionAnnotation parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.videointelligence.v1.FaceDetectionAnnotation prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Face detection annotation.
+   * 
+ * + * Protobuf type {@code google.cloud.videointelligence.v1.FaceDetectionAnnotation} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.videointelligence.v1.FaceDetectionAnnotation) + com.google.cloud.videointelligence.v1.FaceDetectionAnnotationOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.videointelligence.v1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1_FaceDetectionAnnotation_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.videointelligence.v1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1_FaceDetectionAnnotation_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.videointelligence.v1.FaceDetectionAnnotation.class, + com.google.cloud.videointelligence.v1.FaceDetectionAnnotation.Builder.class); + } + + // Construct using com.google.cloud.videointelligence.v1.FaceDetectionAnnotation.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + version_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.videointelligence.v1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1_FaceDetectionAnnotation_descriptor; + } + + @java.lang.Override + public com.google.cloud.videointelligence.v1.FaceDetectionAnnotation + getDefaultInstanceForType() { + return com.google.cloud.videointelligence.v1.FaceDetectionAnnotation.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.videointelligence.v1.FaceDetectionAnnotation build() { + com.google.cloud.videointelligence.v1.FaceDetectionAnnotation result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.videointelligence.v1.FaceDetectionAnnotation buildPartial() { + com.google.cloud.videointelligence.v1.FaceDetectionAnnotation result = + new com.google.cloud.videointelligence.v1.FaceDetectionAnnotation(this); + result.version_ = version_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.videointelligence.v1.FaceDetectionAnnotation) { + return mergeFrom((com.google.cloud.videointelligence.v1.FaceDetectionAnnotation) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.videointelligence.v1.FaceDetectionAnnotation other) { + if (other + == com.google.cloud.videointelligence.v1.FaceDetectionAnnotation.getDefaultInstance()) + return this; + if (!other.getVersion().isEmpty()) { + version_ = other.version_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.videointelligence.v1.FaceDetectionAnnotation parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.videointelligence.v1.FaceDetectionAnnotation) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object version_ = ""; + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 5; + * + * @return The version. + */ + public java.lang.String getVersion() { + java.lang.Object ref = version_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + version_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 5; + * + * @return The bytes for version. + */ + public com.google.protobuf.ByteString getVersionBytes() { + java.lang.Object ref = version_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + version_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 5; + * + * @param value The version to set. + * @return This builder for chaining. + */ + public Builder setVersion(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + version_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 5; + * + * @return This builder for chaining. + */ + public Builder clearVersion() { + + version_ = getDefaultInstance().getVersion(); + onChanged(); + return this; + } + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 5; + * + * @param value The bytes for version to set. + * @return This builder for chaining. + */ + public Builder setVersionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + version_ = value; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.videointelligence.v1.FaceDetectionAnnotation) + } + + // @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.FaceDetectionAnnotation) + private static final com.google.cloud.videointelligence.v1.FaceDetectionAnnotation + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.videointelligence.v1.FaceDetectionAnnotation(); + } + + public static com.google.cloud.videointelligence.v1.FaceDetectionAnnotation getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public FaceDetectionAnnotation parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new FaceDetectionAnnotation(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.videointelligence.v1.FaceDetectionAnnotation getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceDetectionAnnotationOrBuilder.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceDetectionAnnotationOrBuilder.java new file mode 100644 index 000000000..85d141435 --- /dev/null +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceDetectionAnnotationOrBuilder.java @@ -0,0 +1,50 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/videointelligence/v1/video_intelligence.proto + +package com.google.cloud.videointelligence.v1; + +public interface FaceDetectionAnnotationOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.videointelligence.v1.FaceDetectionAnnotation) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Feature version.
+   * 
+ * + * string version = 5; + * + * @return The version. + */ + java.lang.String getVersion(); + /** + * + * + *
+   * Feature version.
+   * 
+ * + * string version = 5; + * + * @return The bytes for version. + */ + com.google.protobuf.ByteString getVersionBytes(); +} diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceDetectionConfig.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceDetectionConfig.java index ddcf8d8a0..a64155c33 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceDetectionConfig.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceDetectionConfig.java @@ -82,6 +82,11 @@ private FaceDetectionConfig( includeBoundingBoxes_ = input.readBool(); break; } + case 40: + { + includeAttributes_ = input.readBool(); + break; + } default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { @@ -175,7 +180,7 @@ public com.google.protobuf.ByteString getModelBytes() { * * *
-   * Whether bounding boxes be included in the face annotation output.
+   * Whether bounding boxes are included in the face annotation output.
    * 
* * bool include_bounding_boxes = 2; @@ -187,6 +192,25 @@ public boolean getIncludeBoundingBoxes() { return includeBoundingBoxes_; } + public static final int INCLUDE_ATTRIBUTES_FIELD_NUMBER = 5; + private boolean includeAttributes_; + /** + * + * + *
+   * Whether to enable face attributes detection, such as glasses, dark_glasses,
+   * mouth_open etc. Ignored if 'include_bounding_boxes' is set to false.
+   * 
+ * + * bool include_attributes = 5; + * + * @return The includeAttributes. + */ + @java.lang.Override + public boolean getIncludeAttributes() { + return includeAttributes_; + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -207,6 +231,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (includeBoundingBoxes_ != false) { output.writeBool(2, includeBoundingBoxes_); } + if (includeAttributes_ != false) { + output.writeBool(5, includeAttributes_); + } unknownFields.writeTo(output); } @@ -222,6 +249,9 @@ public int getSerializedSize() { if (includeBoundingBoxes_ != false) { size += com.google.protobuf.CodedOutputStream.computeBoolSize(2, includeBoundingBoxes_); } + if (includeAttributes_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(5, includeAttributes_); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -240,6 +270,7 @@ public boolean equals(final java.lang.Object obj) { if (!getModel().equals(other.getModel())) return false; if (getIncludeBoundingBoxes() != other.getIncludeBoundingBoxes()) return false; + if (getIncludeAttributes() != other.getIncludeAttributes()) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -255,6 +286,8 @@ public int hashCode() { hash = (53 * hash) + getModel().hashCode(); hash = (37 * hash) + INCLUDE_BOUNDING_BOXES_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getIncludeBoundingBoxes()); + hash = (37 * hash) + INCLUDE_ATTRIBUTES_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getIncludeAttributes()); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -405,6 +438,8 @@ public Builder clear() { includeBoundingBoxes_ = false; + includeAttributes_ = false; + return this; } @@ -434,6 +469,7 @@ public com.google.cloud.videointelligence.v1.FaceDetectionConfig buildPartial() new com.google.cloud.videointelligence.v1.FaceDetectionConfig(this); result.model_ = model_; result.includeBoundingBoxes_ = includeBoundingBoxes_; + result.includeAttributes_ = includeAttributes_; onBuilt(); return result; } @@ -491,6 +527,9 @@ public Builder mergeFrom(com.google.cloud.videointelligence.v1.FaceDetectionConf if (other.getIncludeBoundingBoxes() != false) { setIncludeBoundingBoxes(other.getIncludeBoundingBoxes()); } + if (other.getIncludeAttributes() != false) { + setIncludeAttributes(other.getIncludeAttributes()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -642,7 +681,7 @@ public Builder setModelBytes(com.google.protobuf.ByteString value) { * * *
-     * Whether bounding boxes be included in the face annotation output.
+     * Whether bounding boxes are included in the face annotation output.
      * 
* * bool include_bounding_boxes = 2; @@ -657,7 +696,7 @@ public boolean getIncludeBoundingBoxes() { * * *
-     * Whether bounding boxes be included in the face annotation output.
+     * Whether bounding boxes are included in the face annotation output.
      * 
* * bool include_bounding_boxes = 2; @@ -675,7 +714,7 @@ public Builder setIncludeBoundingBoxes(boolean value) { * * *
-     * Whether bounding boxes be included in the face annotation output.
+     * Whether bounding boxes are included in the face annotation output.
      * 
* * bool include_bounding_boxes = 2; @@ -689,6 +728,61 @@ public Builder clearIncludeBoundingBoxes() { return this; } + private boolean includeAttributes_; + /** + * + * + *
+     * Whether to enable face attributes detection, such as glasses, dark_glasses,
+     * mouth_open etc. Ignored if 'include_bounding_boxes' is set to false.
+     * 
+ * + * bool include_attributes = 5; + * + * @return The includeAttributes. + */ + @java.lang.Override + public boolean getIncludeAttributes() { + return includeAttributes_; + } + /** + * + * + *
+     * Whether to enable face attributes detection, such as glasses, dark_glasses,
+     * mouth_open etc. Ignored if 'include_bounding_boxes' is set to false.
+     * 
+ * + * bool include_attributes = 5; + * + * @param value The includeAttributes to set. + * @return This builder for chaining. + */ + public Builder setIncludeAttributes(boolean value) { + + includeAttributes_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Whether to enable face attributes detection, such as glasses, dark_glasses,
+     * mouth_open etc. Ignored if 'include_bounding_boxes' is set to false.
+     * 
+ * + * bool include_attributes = 5; + * + * @return This builder for chaining. + */ + public Builder clearIncludeAttributes() { + + includeAttributes_ = false; + onChanged(); + return this; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceDetectionConfigOrBuilder.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceDetectionConfigOrBuilder.java index e1559f2ef..946bb8b9b 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceDetectionConfigOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceDetectionConfigOrBuilder.java @@ -56,7 +56,7 @@ public interface FaceDetectionConfigOrBuilder * * *
-   * Whether bounding boxes be included in the face annotation output.
+   * Whether bounding boxes are included in the face annotation output.
    * 
* * bool include_bounding_boxes = 2; @@ -64,4 +64,18 @@ public interface FaceDetectionConfigOrBuilder * @return The includeBoundingBoxes. */ boolean getIncludeBoundingBoxes(); + + /** + * + * + *
+   * Whether to enable face attributes detection, such as glasses, dark_glasses,
+   * mouth_open etc. Ignored if 'include_bounding_boxes' is set to false.
+   * 
+ * + * bool include_attributes = 5; + * + * @return The includeAttributes. + */ + boolean getIncludeAttributes(); } diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceFrame.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceFrame.java index b8e97483a..cd65c75a6 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceFrame.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceFrame.java @@ -22,11 +22,12 @@ * * *
- * Video frame level annotation results for face detection.
+ * Deprecated. No effect.
  * 
* * Protobuf type {@code google.cloud.videointelligence.v1.FaceFrame} */ +@java.lang.Deprecated public final class FaceFrame extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:google.cloud.videointelligence.v1.FaceFrame) @@ -462,7 +463,7 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
-   * Video frame level annotation results for face detection.
+   * Deprecated. No effect.
    * 
* * Protobuf type {@code google.cloud.videointelligence.v1.FaceFrame} diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceFrameOrBuilder.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceFrameOrBuilder.java index d656a820a..1f6d37d7b 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceFrameOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceFrameOrBuilder.java @@ -18,6 +18,7 @@ package com.google.cloud.videointelligence.v1; +@java.lang.Deprecated public interface FaceFrameOrBuilder extends // @@protoc_insertion_point(interface_extends:google.cloud.videointelligence.v1.FaceFrame) diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/Feature.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/Feature.java index fb848f90b..cbab0641e 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/Feature.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/Feature.java @@ -72,7 +72,7 @@ public enum Feature implements com.google.protobuf.ProtocolMessageEnum { * * *
-   * Human face detection and tracking.
+   * Human face detection.
    * 
* * FACE_DETECTION = 4; @@ -118,6 +118,16 @@ public enum Feature implements com.google.protobuf.ProtocolMessageEnum { * LOGO_RECOGNITION = 12; */ LOGO_RECOGNITION(12), + /** + * + * + *
+   * Person detection.
+   * 
+ * + * PERSON_DETECTION = 14; + */ + PERSON_DETECTION(14), UNRECOGNIZED(-1), ; @@ -165,7 +175,7 @@ public enum Feature implements com.google.protobuf.ProtocolMessageEnum { * * *
-   * Human face detection and tracking.
+   * Human face detection.
    * 
* * FACE_DETECTION = 4; @@ -211,6 +221,16 @@ public enum Feature implements com.google.protobuf.ProtocolMessageEnum { * LOGO_RECOGNITION = 12; */ public static final int LOGO_RECOGNITION_VALUE = 12; + /** + * + * + *
+   * Person detection.
+   * 
+ * + * PERSON_DETECTION = 14; + */ + public static final int PERSON_DETECTION_VALUE = 14; public final int getNumber() { if (this == UNRECOGNIZED) { @@ -254,6 +274,8 @@ public static Feature forNumber(int value) { return OBJECT_TRACKING; case 12: return LOGO_RECOGNITION; + case 14: + return PERSON_DETECTION; default: return null; } diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/LabelAnnotation.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/LabelAnnotation.java index ff74e206d..157b0c61b 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/LabelAnnotation.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/LabelAnnotation.java @@ -41,6 +41,7 @@ private LabelAnnotation() { categoryEntities_ = java.util.Collections.emptyList(); segments_ = java.util.Collections.emptyList(); frames_ = java.util.Collections.emptyList(); + version_ = ""; } @java.lang.Override @@ -127,6 +128,13 @@ private LabelAnnotation( extensionRegistry)); break; } + case 42: + { + java.lang.String s = input.readStringRequireUtf8(); + + version_ = s; + break; + } default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { @@ -225,9 +233,9 @@ public com.google.cloud.videointelligence.v1.EntityOrBuilder getEntityOrBuilder( * *
    * Common categories for the detected entity.
-   * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-   * cases there might be more than one categories e.g. `Terrier` could also be
-   * a `pet`.
+   * For example, when the label is `Terrier`, the category is likely `dog`. And
+   * in some cases there might be more than one categories e.g., `Terrier` could
+   * also be a `pet`.
    * 
* * repeated .google.cloud.videointelligence.v1.Entity category_entities = 2; @@ -241,9 +249,9 @@ public java.util.List getCategoryE * *
    * Common categories for the detected entity.
-   * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-   * cases there might be more than one categories e.g. `Terrier` could also be
-   * a `pet`.
+   * For example, when the label is `Terrier`, the category is likely `dog`. And
+   * in some cases there might be more than one categories e.g., `Terrier` could
+   * also be a `pet`.
    * 
* * repeated .google.cloud.videointelligence.v1.Entity category_entities = 2; @@ -258,9 +266,9 @@ public java.util.List getCategoryE * *
    * Common categories for the detected entity.
-   * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-   * cases there might be more than one categories e.g. `Terrier` could also be
-   * a `pet`.
+   * For example, when the label is `Terrier`, the category is likely `dog`. And
+   * in some cases there might be more than one categories e.g., `Terrier` could
+   * also be a `pet`.
    * 
* * repeated .google.cloud.videointelligence.v1.Entity category_entities = 2; @@ -274,9 +282,9 @@ public int getCategoryEntitiesCount() { * *
    * Common categories for the detected entity.
-   * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-   * cases there might be more than one categories e.g. `Terrier` could also be
-   * a `pet`.
+   * For example, when the label is `Terrier`, the category is likely `dog`. And
+   * in some cases there might be more than one categories e.g., `Terrier` could
+   * also be a `pet`.
    * 
* * repeated .google.cloud.videointelligence.v1.Entity category_entities = 2; @@ -290,9 +298,9 @@ public com.google.cloud.videointelligence.v1.Entity getCategoryEntities(int inde * *
    * Common categories for the detected entity.
-   * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-   * cases there might be more than one categories e.g. `Terrier` could also be
-   * a `pet`.
+   * For example, when the label is `Terrier`, the category is likely `dog`. And
+   * in some cases there might be more than one categories e.g., `Terrier` could
+   * also be a `pet`.
    * 
* * repeated .google.cloud.videointelligence.v1.Entity category_entities = 2; @@ -442,6 +450,55 @@ public com.google.cloud.videointelligence.v1.LabelFrameOrBuilder getFramesOrBuil return frames_.get(index); } + public static final int VERSION_FIELD_NUMBER = 5; + private volatile java.lang.Object version_; + /** + * + * + *
+   * Feature version.
+   * 
+ * + * string version = 5; + * + * @return The version. + */ + @java.lang.Override + public java.lang.String getVersion() { + java.lang.Object ref = version_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + version_ = s; + return s; + } + } + /** + * + * + *
+   * Feature version.
+   * 
+ * + * string version = 5; + * + * @return The bytes for version. + */ + @java.lang.Override + public com.google.protobuf.ByteString getVersionBytes() { + java.lang.Object ref = version_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + version_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -468,6 +525,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io for (int i = 0; i < frames_.size(); i++) { output.writeMessage(4, frames_.get(i)); } + if (!getVersionBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 5, version_); + } unknownFields.writeTo(output); } @@ -489,6 +549,9 @@ public int getSerializedSize() { for (int i = 0; i < frames_.size(); i++) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, frames_.get(i)); } + if (!getVersionBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(5, version_); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -512,6 +575,7 @@ public boolean equals(final java.lang.Object obj) { if (!getCategoryEntitiesList().equals(other.getCategoryEntitiesList())) return false; if (!getSegmentsList().equals(other.getSegmentsList())) return false; if (!getFramesList().equals(other.getFramesList())) return false; + if (!getVersion().equals(other.getVersion())) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -539,6 +603,8 @@ public int hashCode() { hash = (37 * hash) + FRAMES_FIELD_NUMBER; hash = (53 * hash) + getFramesList().hashCode(); } + hash = (37 * hash) + VERSION_FIELD_NUMBER; + hash = (53 * hash) + getVersion().hashCode(); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -713,6 +779,8 @@ public Builder clear() { } else { framesBuilder_.clear(); } + version_ = ""; + return this; } @@ -773,6 +841,7 @@ public com.google.cloud.videointelligence.v1.LabelAnnotation buildPartial() { } else { result.frames_ = framesBuilder_.build(); } + result.version_ = version_; onBuilt(); return result; } @@ -907,6 +976,10 @@ public Builder mergeFrom(com.google.cloud.videointelligence.v1.LabelAnnotation o } } } + if (!other.getVersion().isEmpty()) { + version_ = other.version_; + onChanged(); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -1147,9 +1220,9 @@ private void ensureCategoryEntitiesIsMutable() { * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1.Entity category_entities = 2; @@ -1166,9 +1239,9 @@ public java.util.List getCategoryE * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1.Entity category_entities = 2; @@ -1185,9 +1258,9 @@ public int getCategoryEntitiesCount() { * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1.Entity category_entities = 2; @@ -1204,9 +1277,9 @@ public com.google.cloud.videointelligence.v1.Entity getCategoryEntities(int inde * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1.Entity category_entities = 2; @@ -1230,9 +1303,9 @@ public Builder setCategoryEntities( * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1.Entity category_entities = 2; @@ -1253,9 +1326,9 @@ public Builder setCategoryEntities( * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1.Entity category_entities = 2; @@ -1278,9 +1351,9 @@ public Builder addCategoryEntities(com.google.cloud.videointelligence.v1.Entity * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1.Entity category_entities = 2; @@ -1304,9 +1377,9 @@ public Builder addCategoryEntities( * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1.Entity category_entities = 2; @@ -1327,9 +1400,9 @@ public Builder addCategoryEntities( * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1.Entity category_entities = 2; @@ -1350,9 +1423,9 @@ public Builder addCategoryEntities( * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1.Entity category_entities = 2; @@ -1373,9 +1446,9 @@ public Builder addAllCategoryEntities( * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1.Entity category_entities = 2; @@ -1395,9 +1468,9 @@ public Builder clearCategoryEntities() { * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1.Entity category_entities = 2; @@ -1417,9 +1490,9 @@ public Builder removeCategoryEntities(int index) { * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1.Entity category_entities = 2; @@ -1433,9 +1506,9 @@ public com.google.cloud.videointelligence.v1.Entity.Builder getCategoryEntitiesB * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1.Entity category_entities = 2; @@ -1453,9 +1526,9 @@ public com.google.cloud.videointelligence.v1.EntityOrBuilder getCategoryEntities * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1.Entity category_entities = 2; @@ -1473,9 +1546,9 @@ public com.google.cloud.videointelligence.v1.EntityOrBuilder getCategoryEntities * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1.Entity category_entities = 2; @@ -1489,9 +1562,9 @@ public com.google.cloud.videointelligence.v1.Entity.Builder addCategoryEntitiesB * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1.Entity category_entities = 2; @@ -1506,9 +1579,9 @@ public com.google.cloud.videointelligence.v1.Entity.Builder addCategoryEntitiesB * *
      * Common categories for the detected entity.
-     * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-     * cases there might be more than one categories e.g. `Terrier` could also be
-     * a `pet`.
+     * For example, when the label is `Terrier`, the category is likely `dog`. And
+     * in some cases there might be more than one categories e.g., `Terrier` could
+     * also be a `pet`.
      * 
* * repeated .google.cloud.videointelligence.v1.Entity category_entities = 2; @@ -2246,6 +2319,112 @@ public com.google.cloud.videointelligence.v1.LabelFrame.Builder addFramesBuilder return framesBuilder_; } + private java.lang.Object version_ = ""; + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 5; + * + * @return The version. + */ + public java.lang.String getVersion() { + java.lang.Object ref = version_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + version_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 5; + * + * @return The bytes for version. + */ + public com.google.protobuf.ByteString getVersionBytes() { + java.lang.Object ref = version_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + version_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 5; + * + * @param value The version to set. + * @return This builder for chaining. + */ + public Builder setVersion(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + version_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 5; + * + * @return This builder for chaining. + */ + public Builder clearVersion() { + + version_ = getDefaultInstance().getVersion(); + onChanged(); + return this; + } + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 5; + * + * @param value The bytes for version to set. + * @return This builder for chaining. + */ + public Builder setVersionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + version_ = value; + onChanged(); + return this; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/LabelAnnotationOrBuilder.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/LabelAnnotationOrBuilder.java index 6b3110b21..e25ebda5a 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/LabelAnnotationOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/LabelAnnotationOrBuilder.java @@ -63,9 +63,9 @@ public interface LabelAnnotationOrBuilder * *
    * Common categories for the detected entity.
-   * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-   * cases there might be more than one categories e.g. `Terrier` could also be
-   * a `pet`.
+   * For example, when the label is `Terrier`, the category is likely `dog`. And
+   * in some cases there might be more than one categories e.g., `Terrier` could
+   * also be a `pet`.
    * 
* * repeated .google.cloud.videointelligence.v1.Entity category_entities = 2; @@ -76,9 +76,9 @@ public interface LabelAnnotationOrBuilder * *
    * Common categories for the detected entity.
-   * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-   * cases there might be more than one categories e.g. `Terrier` could also be
-   * a `pet`.
+   * For example, when the label is `Terrier`, the category is likely `dog`. And
+   * in some cases there might be more than one categories e.g., `Terrier` could
+   * also be a `pet`.
    * 
* * repeated .google.cloud.videointelligence.v1.Entity category_entities = 2; @@ -89,9 +89,9 @@ public interface LabelAnnotationOrBuilder * *
    * Common categories for the detected entity.
-   * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-   * cases there might be more than one categories e.g. `Terrier` could also be
-   * a `pet`.
+   * For example, when the label is `Terrier`, the category is likely `dog`. And
+   * in some cases there might be more than one categories e.g., `Terrier` could
+   * also be a `pet`.
    * 
* * repeated .google.cloud.videointelligence.v1.Entity category_entities = 2; @@ -102,9 +102,9 @@ public interface LabelAnnotationOrBuilder * *
    * Common categories for the detected entity.
-   * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-   * cases there might be more than one categories e.g. `Terrier` could also be
-   * a `pet`.
+   * For example, when the label is `Terrier`, the category is likely `dog`. And
+   * in some cases there might be more than one categories e.g., `Terrier` could
+   * also be a `pet`.
    * 
* * repeated .google.cloud.videointelligence.v1.Entity category_entities = 2; @@ -116,9 +116,9 @@ public interface LabelAnnotationOrBuilder * *
    * Common categories for the detected entity.
-   * E.g. when the label is `Terrier` the category is likely `dog`. And in some
-   * cases there might be more than one categories e.g. `Terrier` could also be
-   * a `pet`.
+   * For example, when the label is `Terrier`, the category is likely `dog`. And
+   * in some cases there might be more than one categories e.g., `Terrier` could
+   * also be a `pet`.
    * 
* * repeated .google.cloud.videointelligence.v1.Entity category_entities = 2; @@ -228,4 +228,29 @@ public interface LabelAnnotationOrBuilder * repeated .google.cloud.videointelligence.v1.LabelFrame frames = 4; */ com.google.cloud.videointelligence.v1.LabelFrameOrBuilder getFramesOrBuilder(int index); + + /** + * + * + *
+   * Feature version.
+   * 
+ * + * string version = 5; + * + * @return The version. + */ + java.lang.String getVersion(); + /** + * + * + *
+   * Feature version.
+   * 
+ * + * string version = 5; + * + * @return The bytes for version. + */ + com.google.protobuf.ByteString getVersionBytes(); } diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/LabelDetectionConfig.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/LabelDetectionConfig.java index cdf9551d7..8962b0516 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/LabelDetectionConfig.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/LabelDetectionConfig.java @@ -182,9 +182,9 @@ public com.google.cloud.videointelligence.v1.LabelDetectionMode getLabelDetectio * * *
-   * Whether the video has been shot from a stationary (i.e. non-moving) camera.
-   * When set to true, might improve detection accuracy for moving objects.
-   * Should be used with `SHOT_AND_FRAME_MODE` enabled.
+   * Whether the video has been shot from a stationary (i.e., non-moving)
+   * camera. When set to true, might improve detection accuracy for moving
+   * objects. Should be used with `SHOT_AND_FRAME_MODE` enabled.
    * 
* * bool stationary_camera = 2; @@ -259,7 +259,7 @@ public com.google.protobuf.ByteString getModelBytes() { * frame-level detection. If not set, it is set to 0.4 by default. The valid * range for this threshold is [0.1, 0.9]. Any value set outside of this * range will be clipped. - * Note: for best results please follow the default threshold. We will update + * Note: For best results, follow the default threshold. We will update * the default threshold everytime when we release a new model. * * @@ -279,10 +279,10 @@ public float getFrameConfidenceThreshold() { * *
    * The confidence threshold we perform filtering on the labels from
-   * video-level and shot-level detections. If not set, it is set to 0.3 by
+   * video-level and shot-level detections. If not set, it's set to 0.3 by
    * default. The valid range for this threshold is [0.1, 0.9]. Any value set
    * outside of this range will be clipped.
-   * Note: for best results please follow the default threshold. We will update
+   * Note: For best results, follow the default threshold. We will update
    * the default threshold everytime when we release a new model.
    * 
* @@ -789,9 +789,9 @@ public Builder clearLabelDetectionMode() { * * *
-     * Whether the video has been shot from a stationary (i.e. non-moving) camera.
-     * When set to true, might improve detection accuracy for moving objects.
-     * Should be used with `SHOT_AND_FRAME_MODE` enabled.
+     * Whether the video has been shot from a stationary (i.e., non-moving)
+     * camera. When set to true, might improve detection accuracy for moving
+     * objects. Should be used with `SHOT_AND_FRAME_MODE` enabled.
      * 
* * bool stationary_camera = 2; @@ -806,9 +806,9 @@ public boolean getStationaryCamera() { * * *
-     * Whether the video has been shot from a stationary (i.e. non-moving) camera.
-     * When set to true, might improve detection accuracy for moving objects.
-     * Should be used with `SHOT_AND_FRAME_MODE` enabled.
+     * Whether the video has been shot from a stationary (i.e., non-moving)
+     * camera. When set to true, might improve detection accuracy for moving
+     * objects. Should be used with `SHOT_AND_FRAME_MODE` enabled.
      * 
* * bool stationary_camera = 2; @@ -826,9 +826,9 @@ public Builder setStationaryCamera(boolean value) { * * *
-     * Whether the video has been shot from a stationary (i.e. non-moving) camera.
-     * When set to true, might improve detection accuracy for moving objects.
-     * Should be used with `SHOT_AND_FRAME_MODE` enabled.
+     * Whether the video has been shot from a stationary (i.e., non-moving)
+     * camera. When set to true, might improve detection accuracy for moving
+     * objects. Should be used with `SHOT_AND_FRAME_MODE` enabled.
      * 
* * bool stationary_camera = 2; @@ -967,7 +967,7 @@ public Builder setModelBytes(com.google.protobuf.ByteString value) { * frame-level detection. If not set, it is set to 0.4 by default. The valid * range for this threshold is [0.1, 0.9]. Any value set outside of this * range will be clipped. - * Note: for best results please follow the default threshold. We will update + * Note: For best results, follow the default threshold. We will update * the default threshold everytime when we release a new model. * * @@ -987,7 +987,7 @@ public float getFrameConfidenceThreshold() { * frame-level detection. If not set, it is set to 0.4 by default. The valid * range for this threshold is [0.1, 0.9]. Any value set outside of this * range will be clipped. - * Note: for best results please follow the default threshold. We will update + * Note: For best results, follow the default threshold. We will update * the default threshold everytime when we release a new model. * * @@ -1010,7 +1010,7 @@ public Builder setFrameConfidenceThreshold(float value) { * frame-level detection. If not set, it is set to 0.4 by default. The valid * range for this threshold is [0.1, 0.9]. Any value set outside of this * range will be clipped. - * Note: for best results please follow the default threshold. We will update + * Note: For best results, follow the default threshold. We will update * the default threshold everytime when we release a new model. * * @@ -1031,10 +1031,10 @@ public Builder clearFrameConfidenceThreshold() { * *
      * The confidence threshold we perform filtering on the labels from
-     * video-level and shot-level detections. If not set, it is set to 0.3 by
+     * video-level and shot-level detections. If not set, it's set to 0.3 by
      * default. The valid range for this threshold is [0.1, 0.9]. Any value set
      * outside of this range will be clipped.
-     * Note: for best results please follow the default threshold. We will update
+     * Note: For best results, follow the default threshold. We will update
      * the default threshold everytime when we release a new model.
      * 
* @@ -1051,10 +1051,10 @@ public float getVideoConfidenceThreshold() { * *
      * The confidence threshold we perform filtering on the labels from
-     * video-level and shot-level detections. If not set, it is set to 0.3 by
+     * video-level and shot-level detections. If not set, it's set to 0.3 by
      * default. The valid range for this threshold is [0.1, 0.9]. Any value set
      * outside of this range will be clipped.
-     * Note: for best results please follow the default threshold. We will update
+     * Note: For best results, follow the default threshold. We will update
      * the default threshold everytime when we release a new model.
      * 
* @@ -1074,10 +1074,10 @@ public Builder setVideoConfidenceThreshold(float value) { * *
      * The confidence threshold we perform filtering on the labels from
-     * video-level and shot-level detections. If not set, it is set to 0.3 by
+     * video-level and shot-level detections. If not set, it's set to 0.3 by
      * default. The valid range for this threshold is [0.1, 0.9]. Any value set
      * outside of this range will be clipped.
-     * Note: for best results please follow the default threshold. We will update
+     * Note: For best results, follow the default threshold. We will update
      * the default threshold everytime when we release a new model.
      * 
* diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/LabelDetectionConfigOrBuilder.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/LabelDetectionConfigOrBuilder.java index b1880743e..25870c79a 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/LabelDetectionConfigOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/LabelDetectionConfigOrBuilder.java @@ -56,9 +56,9 @@ public interface LabelDetectionConfigOrBuilder * * *
-   * Whether the video has been shot from a stationary (i.e. non-moving) camera.
-   * When set to true, might improve detection accuracy for moving objects.
-   * Should be used with `SHOT_AND_FRAME_MODE` enabled.
+   * Whether the video has been shot from a stationary (i.e., non-moving)
+   * camera. When set to true, might improve detection accuracy for moving
+   * objects. Should be used with `SHOT_AND_FRAME_MODE` enabled.
    * 
* * bool stationary_camera = 2; @@ -104,7 +104,7 @@ public interface LabelDetectionConfigOrBuilder * frame-level detection. If not set, it is set to 0.4 by default. The valid * range for this threshold is [0.1, 0.9]. Any value set outside of this * range will be clipped. - * Note: for best results please follow the default threshold. We will update + * Note: For best results, follow the default threshold. We will update * the default threshold everytime when we release a new model. * * @@ -119,10 +119,10 @@ public interface LabelDetectionConfigOrBuilder * *
    * The confidence threshold we perform filtering on the labels from
-   * video-level and shot-level detections. If not set, it is set to 0.3 by
+   * video-level and shot-level detections. If not set, it's set to 0.3 by
    * default. The valid range for this threshold is [0.1, 0.9]. Any value set
    * outside of this range will be clipped.
-   * Note: for best results please follow the default threshold. We will update
+   * Note: For best results, follow the default threshold. We will update
    * the default threshold everytime when we release a new model.
    * 
* diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/ObjectTrackingAnnotation.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/ObjectTrackingAnnotation.java index 73f37ae71..1bcf9b0f1 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/ObjectTrackingAnnotation.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/ObjectTrackingAnnotation.java @@ -39,6 +39,7 @@ private ObjectTrackingAnnotation(com.google.protobuf.GeneratedMessageV3.Builder< private ObjectTrackingAnnotation() { frames_ = java.util.Collections.emptyList(); + version_ = ""; } @java.lang.Override @@ -131,6 +132,13 @@ private ObjectTrackingAnnotation( trackInfo_ = input.readInt64(); break; } + case 50: + { + java.lang.String s = input.readStringRequireUtf8(); + + version_ = s; + break; + } default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { @@ -446,6 +454,55 @@ public com.google.cloud.videointelligence.v1.ObjectTrackingFrameOrBuilder getFra return frames_.get(index); } + public static final int VERSION_FIELD_NUMBER = 6; + private volatile java.lang.Object version_; + /** + * + * + *
+   * Feature version.
+   * 
+ * + * string version = 6; + * + * @return The version. + */ + @java.lang.Override + public java.lang.String getVersion() { + java.lang.Object ref = version_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + version_ = s; + return s; + } + } + /** + * + * + *
+   * Feature version.
+   * 
+ * + * string version = 6; + * + * @return The bytes for version. + */ + @java.lang.Override + public com.google.protobuf.ByteString getVersionBytes() { + java.lang.Object ref = version_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + version_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -475,6 +532,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (trackInfoCase_ == 5) { output.writeInt64(5, (long) ((java.lang.Long) trackInfo_)); } + if (!getVersionBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 6, version_); + } unknownFields.writeTo(output); } @@ -503,6 +563,9 @@ public int getSerializedSize() { com.google.protobuf.CodedOutputStream.computeInt64Size( 5, (long) ((java.lang.Long) trackInfo_)); } + if (!getVersionBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, version_); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -526,6 +589,7 @@ public boolean equals(final java.lang.Object obj) { if (java.lang.Float.floatToIntBits(getConfidence()) != java.lang.Float.floatToIntBits(other.getConfidence())) return false; if (!getFramesList().equals(other.getFramesList())) return false; + if (!getVersion().equals(other.getVersion())) return false; if (!getTrackInfoCase().equals(other.getTrackInfoCase())) return false; switch (trackInfoCase_) { case 3: @@ -558,6 +622,8 @@ public int hashCode() { hash = (37 * hash) + FRAMES_FIELD_NUMBER; hash = (53 * hash) + getFramesList().hashCode(); } + hash = (37 * hash) + VERSION_FIELD_NUMBER; + hash = (53 * hash) + getVersion().hashCode(); switch (trackInfoCase_) { case 3: hash = (37 * hash) + SEGMENT_FIELD_NUMBER; @@ -732,6 +798,8 @@ public Builder clear() { } else { framesBuilder_.clear(); } + version_ = ""; + trackInfoCase_ = 0; trackInfo_ = null; return this; @@ -788,6 +856,7 @@ public com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation buildParti } else { result.frames_ = framesBuilder_.build(); } + result.version_ = version_; result.trackInfoCase_ = trackInfoCase_; onBuilt(); return result; @@ -873,6 +942,10 @@ public Builder mergeFrom(com.google.cloud.videointelligence.v1.ObjectTrackingAnn } } } + if (!other.getVersion().isEmpty()) { + version_ = other.version_; + onChanged(); + } switch (other.getTrackInfoCase()) { case SEGMENT: { @@ -1881,6 +1954,112 @@ public com.google.cloud.videointelligence.v1.ObjectTrackingFrame.Builder addFram return framesBuilder_; } + private java.lang.Object version_ = ""; + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 6; + * + * @return The version. + */ + public java.lang.String getVersion() { + java.lang.Object ref = version_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + version_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 6; + * + * @return The bytes for version. + */ + public com.google.protobuf.ByteString getVersionBytes() { + java.lang.Object ref = version_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + version_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 6; + * + * @param value The version to set. + * @return This builder for chaining. + */ + public Builder setVersion(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + version_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 6; + * + * @return This builder for chaining. + */ + public Builder clearVersion() { + + version_ = getDefaultInstance().getVersion(); + onChanged(); + return this; + } + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 6; + * + * @param value The bytes for version to set. + * @return This builder for chaining. + */ + public Builder setVersionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + version_ = value; + onChanged(); + return this; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/ObjectTrackingAnnotationOrBuilder.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/ObjectTrackingAnnotationOrBuilder.java index ee8ccb2ba..aea584607 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/ObjectTrackingAnnotationOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/ObjectTrackingAnnotationOrBuilder.java @@ -194,6 +194,31 @@ public interface ObjectTrackingAnnotationOrBuilder */ com.google.cloud.videointelligence.v1.ObjectTrackingFrameOrBuilder getFramesOrBuilder(int index); + /** + * + * + *
+   * Feature version.
+   * 
+ * + * string version = 6; + * + * @return The version. + */ + java.lang.String getVersion(); + /** + * + * + *
+   * Feature version.
+   * 
+ * + * string version = 6; + * + * @return The bytes for version. + */ + com.google.protobuf.ByteString getVersionBytes(); + public com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.TrackInfoCase getTrackInfoCase(); } diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/PersonDetectionAnnotation.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/PersonDetectionAnnotation.java new file mode 100644 index 000000000..a615e5892 --- /dev/null +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/PersonDetectionAnnotation.java @@ -0,0 +1,1136 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/videointelligence/v1/video_intelligence.proto + +package com.google.cloud.videointelligence.v1; + +/** + * + * + *
+ * Person detection annotation per video.
+ * 
+ * + * Protobuf type {@code google.cloud.videointelligence.v1.PersonDetectionAnnotation} + */ +public final class PersonDetectionAnnotation extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.videointelligence.v1.PersonDetectionAnnotation) + PersonDetectionAnnotationOrBuilder { + private static final long serialVersionUID = 0L; + // Use PersonDetectionAnnotation.newBuilder() to construct. + private PersonDetectionAnnotation(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private PersonDetectionAnnotation() { + tracks_ = java.util.Collections.emptyList(); + version_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new PersonDetectionAnnotation(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private PersonDetectionAnnotation( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + tracks_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + tracks_.add( + input.readMessage( + com.google.cloud.videointelligence.v1.Track.parser(), extensionRegistry)); + break; + } + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + + version_ = s; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + tracks_ = java.util.Collections.unmodifiableList(tracks_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.videointelligence.v1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1_PersonDetectionAnnotation_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.videointelligence.v1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1_PersonDetectionAnnotation_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.videointelligence.v1.PersonDetectionAnnotation.class, + com.google.cloud.videointelligence.v1.PersonDetectionAnnotation.Builder.class); + } + + public static final int TRACKS_FIELD_NUMBER = 1; + private java.util.List tracks_; + /** + * + * + *
+   * The detected tracks of a person.
+   * 
+ * + * repeated .google.cloud.videointelligence.v1.Track tracks = 1; + */ + @java.lang.Override + public java.util.List getTracksList() { + return tracks_; + } + /** + * + * + *
+   * The detected tracks of a person.
+   * 
+ * + * repeated .google.cloud.videointelligence.v1.Track tracks = 1; + */ + @java.lang.Override + public java.util.List + getTracksOrBuilderList() { + return tracks_; + } + /** + * + * + *
+   * The detected tracks of a person.
+   * 
+ * + * repeated .google.cloud.videointelligence.v1.Track tracks = 1; + */ + @java.lang.Override + public int getTracksCount() { + return tracks_.size(); + } + /** + * + * + *
+   * The detected tracks of a person.
+   * 
+ * + * repeated .google.cloud.videointelligence.v1.Track tracks = 1; + */ + @java.lang.Override + public com.google.cloud.videointelligence.v1.Track getTracks(int index) { + return tracks_.get(index); + } + /** + * + * + *
+   * The detected tracks of a person.
+   * 
+ * + * repeated .google.cloud.videointelligence.v1.Track tracks = 1; + */ + @java.lang.Override + public com.google.cloud.videointelligence.v1.TrackOrBuilder getTracksOrBuilder(int index) { + return tracks_.get(index); + } + + public static final int VERSION_FIELD_NUMBER = 2; + private volatile java.lang.Object version_; + /** + * + * + *
+   * Feature version.
+   * 
+ * + * string version = 2; + * + * @return The version. + */ + @java.lang.Override + public java.lang.String getVersion() { + java.lang.Object ref = version_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + version_ = s; + return s; + } + } + /** + * + * + *
+   * Feature version.
+   * 
+ * + * string version = 2; + * + * @return The bytes for version. + */ + @java.lang.Override + public com.google.protobuf.ByteString getVersionBytes() { + java.lang.Object ref = version_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + version_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < tracks_.size(); i++) { + output.writeMessage(1, tracks_.get(i)); + } + if (!getVersionBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, version_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < tracks_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, tracks_.get(i)); + } + if (!getVersionBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, version_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.videointelligence.v1.PersonDetectionAnnotation)) { + return super.equals(obj); + } + com.google.cloud.videointelligence.v1.PersonDetectionAnnotation other = + (com.google.cloud.videointelligence.v1.PersonDetectionAnnotation) obj; + + if (!getTracksList().equals(other.getTracksList())) return false; + if (!getVersion().equals(other.getVersion())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getTracksCount() > 0) { + hash = (37 * hash) + TRACKS_FIELD_NUMBER; + hash = (53 * hash) + getTracksList().hashCode(); + } + hash = (37 * hash) + VERSION_FIELD_NUMBER; + hash = (53 * hash) + getVersion().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.videointelligence.v1.PersonDetectionAnnotation parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.videointelligence.v1.PersonDetectionAnnotation parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.videointelligence.v1.PersonDetectionAnnotation parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.videointelligence.v1.PersonDetectionAnnotation parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.videointelligence.v1.PersonDetectionAnnotation parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.videointelligence.v1.PersonDetectionAnnotation parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.videointelligence.v1.PersonDetectionAnnotation parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.videointelligence.v1.PersonDetectionAnnotation parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.videointelligence.v1.PersonDetectionAnnotation parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.videointelligence.v1.PersonDetectionAnnotation parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.videointelligence.v1.PersonDetectionAnnotation parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.videointelligence.v1.PersonDetectionAnnotation parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.videointelligence.v1.PersonDetectionAnnotation prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Person detection annotation per video.
+   * 
+ * + * Protobuf type {@code google.cloud.videointelligence.v1.PersonDetectionAnnotation} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.videointelligence.v1.PersonDetectionAnnotation) + com.google.cloud.videointelligence.v1.PersonDetectionAnnotationOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.videointelligence.v1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1_PersonDetectionAnnotation_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.videointelligence.v1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1_PersonDetectionAnnotation_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.videointelligence.v1.PersonDetectionAnnotation.class, + com.google.cloud.videointelligence.v1.PersonDetectionAnnotation.Builder.class); + } + + // Construct using com.google.cloud.videointelligence.v1.PersonDetectionAnnotation.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getTracksFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + if (tracksBuilder_ == null) { + tracks_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + tracksBuilder_.clear(); + } + version_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.videointelligence.v1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1_PersonDetectionAnnotation_descriptor; + } + + @java.lang.Override + public com.google.cloud.videointelligence.v1.PersonDetectionAnnotation + getDefaultInstanceForType() { + return com.google.cloud.videointelligence.v1.PersonDetectionAnnotation.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.videointelligence.v1.PersonDetectionAnnotation build() { + com.google.cloud.videointelligence.v1.PersonDetectionAnnotation result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.videointelligence.v1.PersonDetectionAnnotation buildPartial() { + com.google.cloud.videointelligence.v1.PersonDetectionAnnotation result = + new com.google.cloud.videointelligence.v1.PersonDetectionAnnotation(this); + int from_bitField0_ = bitField0_; + if (tracksBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + tracks_ = java.util.Collections.unmodifiableList(tracks_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.tracks_ = tracks_; + } else { + result.tracks_ = tracksBuilder_.build(); + } + result.version_ = version_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.videointelligence.v1.PersonDetectionAnnotation) { + return mergeFrom((com.google.cloud.videointelligence.v1.PersonDetectionAnnotation) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.videointelligence.v1.PersonDetectionAnnotation other) { + if (other + == com.google.cloud.videointelligence.v1.PersonDetectionAnnotation.getDefaultInstance()) + return this; + if (tracksBuilder_ == null) { + if (!other.tracks_.isEmpty()) { + if (tracks_.isEmpty()) { + tracks_ = other.tracks_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureTracksIsMutable(); + tracks_.addAll(other.tracks_); + } + onChanged(); + } + } else { + if (!other.tracks_.isEmpty()) { + if (tracksBuilder_.isEmpty()) { + tracksBuilder_.dispose(); + tracksBuilder_ = null; + tracks_ = other.tracks_; + bitField0_ = (bitField0_ & ~0x00000001); + tracksBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getTracksFieldBuilder() + : null; + } else { + tracksBuilder_.addAllMessages(other.tracks_); + } + } + } + if (!other.getVersion().isEmpty()) { + version_ = other.version_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.videointelligence.v1.PersonDetectionAnnotation parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.videointelligence.v1.PersonDetectionAnnotation) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int bitField0_; + + private java.util.List tracks_ = + java.util.Collections.emptyList(); + + private void ensureTracksIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + tracks_ = new java.util.ArrayList(tracks_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.videointelligence.v1.Track, + com.google.cloud.videointelligence.v1.Track.Builder, + com.google.cloud.videointelligence.v1.TrackOrBuilder> + tracksBuilder_; + + /** + * + * + *
+     * The detected tracks of a person.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.Track tracks = 1; + */ + public java.util.List getTracksList() { + if (tracksBuilder_ == null) { + return java.util.Collections.unmodifiableList(tracks_); + } else { + return tracksBuilder_.getMessageList(); + } + } + /** + * + * + *
+     * The detected tracks of a person.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.Track tracks = 1; + */ + public int getTracksCount() { + if (tracksBuilder_ == null) { + return tracks_.size(); + } else { + return tracksBuilder_.getCount(); + } + } + /** + * + * + *
+     * The detected tracks of a person.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.Track tracks = 1; + */ + public com.google.cloud.videointelligence.v1.Track getTracks(int index) { + if (tracksBuilder_ == null) { + return tracks_.get(index); + } else { + return tracksBuilder_.getMessage(index); + } + } + /** + * + * + *
+     * The detected tracks of a person.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.Track tracks = 1; + */ + public Builder setTracks(int index, com.google.cloud.videointelligence.v1.Track value) { + if (tracksBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTracksIsMutable(); + tracks_.set(index, value); + onChanged(); + } else { + tracksBuilder_.setMessage(index, value); + } + return this; + } + /** + * + * + *
+     * The detected tracks of a person.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.Track tracks = 1; + */ + public Builder setTracks( + int index, com.google.cloud.videointelligence.v1.Track.Builder builderForValue) { + if (tracksBuilder_ == null) { + ensureTracksIsMutable(); + tracks_.set(index, builderForValue.build()); + onChanged(); + } else { + tracksBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * The detected tracks of a person.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.Track tracks = 1; + */ + public Builder addTracks(com.google.cloud.videointelligence.v1.Track value) { + if (tracksBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTracksIsMutable(); + tracks_.add(value); + onChanged(); + } else { + tracksBuilder_.addMessage(value); + } + return this; + } + /** + * + * + *
+     * The detected tracks of a person.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.Track tracks = 1; + */ + public Builder addTracks(int index, com.google.cloud.videointelligence.v1.Track value) { + if (tracksBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTracksIsMutable(); + tracks_.add(index, value); + onChanged(); + } else { + tracksBuilder_.addMessage(index, value); + } + return this; + } + /** + * + * + *
+     * The detected tracks of a person.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.Track tracks = 1; + */ + public Builder addTracks(com.google.cloud.videointelligence.v1.Track.Builder builderForValue) { + if (tracksBuilder_ == null) { + ensureTracksIsMutable(); + tracks_.add(builderForValue.build()); + onChanged(); + } else { + tracksBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * The detected tracks of a person.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.Track tracks = 1; + */ + public Builder addTracks( + int index, com.google.cloud.videointelligence.v1.Track.Builder builderForValue) { + if (tracksBuilder_ == null) { + ensureTracksIsMutable(); + tracks_.add(index, builderForValue.build()); + onChanged(); + } else { + tracksBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * The detected tracks of a person.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.Track tracks = 1; + */ + public Builder addAllTracks( + java.lang.Iterable values) { + if (tracksBuilder_ == null) { + ensureTracksIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, tracks_); + onChanged(); + } else { + tracksBuilder_.addAllMessages(values); + } + return this; + } + /** + * + * + *
+     * The detected tracks of a person.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.Track tracks = 1; + */ + public Builder clearTracks() { + if (tracksBuilder_ == null) { + tracks_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + tracksBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * The detected tracks of a person.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.Track tracks = 1; + */ + public Builder removeTracks(int index) { + if (tracksBuilder_ == null) { + ensureTracksIsMutable(); + tracks_.remove(index); + onChanged(); + } else { + tracksBuilder_.remove(index); + } + return this; + } + /** + * + * + *
+     * The detected tracks of a person.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.Track tracks = 1; + */ + public com.google.cloud.videointelligence.v1.Track.Builder getTracksBuilder(int index) { + return getTracksFieldBuilder().getBuilder(index); + } + /** + * + * + *
+     * The detected tracks of a person.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.Track tracks = 1; + */ + public com.google.cloud.videointelligence.v1.TrackOrBuilder getTracksOrBuilder(int index) { + if (tracksBuilder_ == null) { + return tracks_.get(index); + } else { + return tracksBuilder_.getMessageOrBuilder(index); + } + } + /** + * + * + *
+     * The detected tracks of a person.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.Track tracks = 1; + */ + public java.util.List + getTracksOrBuilderList() { + if (tracksBuilder_ != null) { + return tracksBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tracks_); + } + } + /** + * + * + *
+     * The detected tracks of a person.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.Track tracks = 1; + */ + public com.google.cloud.videointelligence.v1.Track.Builder addTracksBuilder() { + return getTracksFieldBuilder() + .addBuilder(com.google.cloud.videointelligence.v1.Track.getDefaultInstance()); + } + /** + * + * + *
+     * The detected tracks of a person.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.Track tracks = 1; + */ + public com.google.cloud.videointelligence.v1.Track.Builder addTracksBuilder(int index) { + return getTracksFieldBuilder() + .addBuilder(index, com.google.cloud.videointelligence.v1.Track.getDefaultInstance()); + } + /** + * + * + *
+     * The detected tracks of a person.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.Track tracks = 1; + */ + public java.util.List + getTracksBuilderList() { + return getTracksFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.videointelligence.v1.Track, + com.google.cloud.videointelligence.v1.Track.Builder, + com.google.cloud.videointelligence.v1.TrackOrBuilder> + getTracksFieldBuilder() { + if (tracksBuilder_ == null) { + tracksBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.videointelligence.v1.Track, + com.google.cloud.videointelligence.v1.Track.Builder, + com.google.cloud.videointelligence.v1.TrackOrBuilder>( + tracks_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + tracks_ = null; + } + return tracksBuilder_; + } + + private java.lang.Object version_ = ""; + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 2; + * + * @return The version. + */ + public java.lang.String getVersion() { + java.lang.Object ref = version_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + version_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 2; + * + * @return The bytes for version. + */ + public com.google.protobuf.ByteString getVersionBytes() { + java.lang.Object ref = version_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + version_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 2; + * + * @param value The version to set. + * @return This builder for chaining. + */ + public Builder setVersion(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + version_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 2; + * + * @return This builder for chaining. + */ + public Builder clearVersion() { + + version_ = getDefaultInstance().getVersion(); + onChanged(); + return this; + } + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 2; + * + * @param value The bytes for version to set. + * @return This builder for chaining. + */ + public Builder setVersionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + version_ = value; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.videointelligence.v1.PersonDetectionAnnotation) + } + + // @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.PersonDetectionAnnotation) + private static final com.google.cloud.videointelligence.v1.PersonDetectionAnnotation + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.videointelligence.v1.PersonDetectionAnnotation(); + } + + public static com.google.cloud.videointelligence.v1.PersonDetectionAnnotation + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PersonDetectionAnnotation parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new PersonDetectionAnnotation(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.videointelligence.v1.PersonDetectionAnnotation + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/PersonDetectionAnnotationOrBuilder.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/PersonDetectionAnnotationOrBuilder.java new file mode 100644 index 000000000..340f63beb --- /dev/null +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/PersonDetectionAnnotationOrBuilder.java @@ -0,0 +1,102 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/videointelligence/v1/video_intelligence.proto + +package com.google.cloud.videointelligence.v1; + +public interface PersonDetectionAnnotationOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.videointelligence.v1.PersonDetectionAnnotation) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The detected tracks of a person.
+   * 
+ * + * repeated .google.cloud.videointelligence.v1.Track tracks = 1; + */ + java.util.List getTracksList(); + /** + * + * + *
+   * The detected tracks of a person.
+   * 
+ * + * repeated .google.cloud.videointelligence.v1.Track tracks = 1; + */ + com.google.cloud.videointelligence.v1.Track getTracks(int index); + /** + * + * + *
+   * The detected tracks of a person.
+   * 
+ * + * repeated .google.cloud.videointelligence.v1.Track tracks = 1; + */ + int getTracksCount(); + /** + * + * + *
+   * The detected tracks of a person.
+   * 
+ * + * repeated .google.cloud.videointelligence.v1.Track tracks = 1; + */ + java.util.List + getTracksOrBuilderList(); + /** + * + * + *
+   * The detected tracks of a person.
+   * 
+ * + * repeated .google.cloud.videointelligence.v1.Track tracks = 1; + */ + com.google.cloud.videointelligence.v1.TrackOrBuilder getTracksOrBuilder(int index); + + /** + * + * + *
+   * Feature version.
+   * 
+ * + * string version = 2; + * + * @return The version. + */ + java.lang.String getVersion(); + /** + * + * + *
+   * Feature version.
+   * 
+ * + * string version = 2; + * + * @return The bytes for version. + */ + com.google.protobuf.ByteString getVersionBytes(); +} diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/PersonDetectionConfig.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/PersonDetectionConfig.java new file mode 100644 index 000000000..1bf9be818 --- /dev/null +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/PersonDetectionConfig.java @@ -0,0 +1,747 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/videointelligence/v1/video_intelligence.proto + +package com.google.cloud.videointelligence.v1; + +/** + * + * + *
+ * Config for PERSON_DETECTION.
+ * 
+ * + * Protobuf type {@code google.cloud.videointelligence.v1.PersonDetectionConfig} + */ +public final class PersonDetectionConfig extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.videointelligence.v1.PersonDetectionConfig) + PersonDetectionConfigOrBuilder { + private static final long serialVersionUID = 0L; + // Use PersonDetectionConfig.newBuilder() to construct. + private PersonDetectionConfig(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private PersonDetectionConfig() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new PersonDetectionConfig(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private PersonDetectionConfig( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + includeBoundingBoxes_ = input.readBool(); + break; + } + case 16: + { + includePoseLandmarks_ = input.readBool(); + break; + } + case 24: + { + includeAttributes_ = input.readBool(); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.videointelligence.v1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1_PersonDetectionConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.videointelligence.v1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1_PersonDetectionConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.videointelligence.v1.PersonDetectionConfig.class, + com.google.cloud.videointelligence.v1.PersonDetectionConfig.Builder.class); + } + + public static final int INCLUDE_BOUNDING_BOXES_FIELD_NUMBER = 1; + private boolean includeBoundingBoxes_; + /** + * + * + *
+   * Whether bounding boxes are included in the person detection annotation
+   * output.
+   * 
+ * + * bool include_bounding_boxes = 1; + * + * @return The includeBoundingBoxes. + */ + @java.lang.Override + public boolean getIncludeBoundingBoxes() { + return includeBoundingBoxes_; + } + + public static final int INCLUDE_POSE_LANDMARKS_FIELD_NUMBER = 2; + private boolean includePoseLandmarks_; + /** + * + * + *
+   * Whether to enable pose landmarks detection. Ignored if
+   * 'include_bounding_boxes' is set to false.
+   * 
+ * + * bool include_pose_landmarks = 2; + * + * @return The includePoseLandmarks. + */ + @java.lang.Override + public boolean getIncludePoseLandmarks() { + return includePoseLandmarks_; + } + + public static final int INCLUDE_ATTRIBUTES_FIELD_NUMBER = 3; + private boolean includeAttributes_; + /** + * + * + *
+   * Whether to enable person attributes detection, such as cloth color (black,
+   * blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair,
+   * etc.
+   * Ignored if 'include_bounding_boxes' is set to false.
+   * 
+ * + * bool include_attributes = 3; + * + * @return The includeAttributes. + */ + @java.lang.Override + public boolean getIncludeAttributes() { + return includeAttributes_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (includeBoundingBoxes_ != false) { + output.writeBool(1, includeBoundingBoxes_); + } + if (includePoseLandmarks_ != false) { + output.writeBool(2, includePoseLandmarks_); + } + if (includeAttributes_ != false) { + output.writeBool(3, includeAttributes_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (includeBoundingBoxes_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(1, includeBoundingBoxes_); + } + if (includePoseLandmarks_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(2, includePoseLandmarks_); + } + if (includeAttributes_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(3, includeAttributes_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.videointelligence.v1.PersonDetectionConfig)) { + return super.equals(obj); + } + com.google.cloud.videointelligence.v1.PersonDetectionConfig other = + (com.google.cloud.videointelligence.v1.PersonDetectionConfig) obj; + + if (getIncludeBoundingBoxes() != other.getIncludeBoundingBoxes()) return false; + if (getIncludePoseLandmarks() != other.getIncludePoseLandmarks()) return false; + if (getIncludeAttributes() != other.getIncludeAttributes()) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + INCLUDE_BOUNDING_BOXES_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getIncludeBoundingBoxes()); + hash = (37 * hash) + INCLUDE_POSE_LANDMARKS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getIncludePoseLandmarks()); + hash = (37 * hash) + INCLUDE_ATTRIBUTES_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getIncludeAttributes()); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.videointelligence.v1.PersonDetectionConfig parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.videointelligence.v1.PersonDetectionConfig parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.videointelligence.v1.PersonDetectionConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.videointelligence.v1.PersonDetectionConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.videointelligence.v1.PersonDetectionConfig parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.videointelligence.v1.PersonDetectionConfig parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.videointelligence.v1.PersonDetectionConfig parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.videointelligence.v1.PersonDetectionConfig parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.videointelligence.v1.PersonDetectionConfig parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.videointelligence.v1.PersonDetectionConfig parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.videointelligence.v1.PersonDetectionConfig parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.videointelligence.v1.PersonDetectionConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.videointelligence.v1.PersonDetectionConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Config for PERSON_DETECTION.
+   * 
+ * + * Protobuf type {@code google.cloud.videointelligence.v1.PersonDetectionConfig} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.videointelligence.v1.PersonDetectionConfig) + com.google.cloud.videointelligence.v1.PersonDetectionConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.videointelligence.v1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1_PersonDetectionConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.videointelligence.v1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1_PersonDetectionConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.videointelligence.v1.PersonDetectionConfig.class, + com.google.cloud.videointelligence.v1.PersonDetectionConfig.Builder.class); + } + + // Construct using com.google.cloud.videointelligence.v1.PersonDetectionConfig.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + includeBoundingBoxes_ = false; + + includePoseLandmarks_ = false; + + includeAttributes_ = false; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.videointelligence.v1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1_PersonDetectionConfig_descriptor; + } + + @java.lang.Override + public com.google.cloud.videointelligence.v1.PersonDetectionConfig getDefaultInstanceForType() { + return com.google.cloud.videointelligence.v1.PersonDetectionConfig.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.videointelligence.v1.PersonDetectionConfig build() { + com.google.cloud.videointelligence.v1.PersonDetectionConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.videointelligence.v1.PersonDetectionConfig buildPartial() { + com.google.cloud.videointelligence.v1.PersonDetectionConfig result = + new com.google.cloud.videointelligence.v1.PersonDetectionConfig(this); + result.includeBoundingBoxes_ = includeBoundingBoxes_; + result.includePoseLandmarks_ = includePoseLandmarks_; + result.includeAttributes_ = includeAttributes_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.videointelligence.v1.PersonDetectionConfig) { + return mergeFrom((com.google.cloud.videointelligence.v1.PersonDetectionConfig) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.videointelligence.v1.PersonDetectionConfig other) { + if (other == com.google.cloud.videointelligence.v1.PersonDetectionConfig.getDefaultInstance()) + return this; + if (other.getIncludeBoundingBoxes() != false) { + setIncludeBoundingBoxes(other.getIncludeBoundingBoxes()); + } + if (other.getIncludePoseLandmarks() != false) { + setIncludePoseLandmarks(other.getIncludePoseLandmarks()); + } + if (other.getIncludeAttributes() != false) { + setIncludeAttributes(other.getIncludeAttributes()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.videointelligence.v1.PersonDetectionConfig parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.videointelligence.v1.PersonDetectionConfig) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private boolean includeBoundingBoxes_; + /** + * + * + *
+     * Whether bounding boxes are included in the person detection annotation
+     * output.
+     * 
+ * + * bool include_bounding_boxes = 1; + * + * @return The includeBoundingBoxes. + */ + @java.lang.Override + public boolean getIncludeBoundingBoxes() { + return includeBoundingBoxes_; + } + /** + * + * + *
+     * Whether bounding boxes are included in the person detection annotation
+     * output.
+     * 
+ * + * bool include_bounding_boxes = 1; + * + * @param value The includeBoundingBoxes to set. + * @return This builder for chaining. + */ + public Builder setIncludeBoundingBoxes(boolean value) { + + includeBoundingBoxes_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Whether bounding boxes are included in the person detection annotation
+     * output.
+     * 
+ * + * bool include_bounding_boxes = 1; + * + * @return This builder for chaining. + */ + public Builder clearIncludeBoundingBoxes() { + + includeBoundingBoxes_ = false; + onChanged(); + return this; + } + + private boolean includePoseLandmarks_; + /** + * + * + *
+     * Whether to enable pose landmarks detection. Ignored if
+     * 'include_bounding_boxes' is set to false.
+     * 
+ * + * bool include_pose_landmarks = 2; + * + * @return The includePoseLandmarks. + */ + @java.lang.Override + public boolean getIncludePoseLandmarks() { + return includePoseLandmarks_; + } + /** + * + * + *
+     * Whether to enable pose landmarks detection. Ignored if
+     * 'include_bounding_boxes' is set to false.
+     * 
+ * + * bool include_pose_landmarks = 2; + * + * @param value The includePoseLandmarks to set. + * @return This builder for chaining. + */ + public Builder setIncludePoseLandmarks(boolean value) { + + includePoseLandmarks_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Whether to enable pose landmarks detection. Ignored if
+     * 'include_bounding_boxes' is set to false.
+     * 
+ * + * bool include_pose_landmarks = 2; + * + * @return This builder for chaining. + */ + public Builder clearIncludePoseLandmarks() { + + includePoseLandmarks_ = false; + onChanged(); + return this; + } + + private boolean includeAttributes_; + /** + * + * + *
+     * Whether to enable person attributes detection, such as cloth color (black,
+     * blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair,
+     * etc.
+     * Ignored if 'include_bounding_boxes' is set to false.
+     * 
+ * + * bool include_attributes = 3; + * + * @return The includeAttributes. + */ + @java.lang.Override + public boolean getIncludeAttributes() { + return includeAttributes_; + } + /** + * + * + *
+     * Whether to enable person attributes detection, such as cloth color (black,
+     * blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair,
+     * etc.
+     * Ignored if 'include_bounding_boxes' is set to false.
+     * 
+ * + * bool include_attributes = 3; + * + * @param value The includeAttributes to set. + * @return This builder for chaining. + */ + public Builder setIncludeAttributes(boolean value) { + + includeAttributes_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Whether to enable person attributes detection, such as cloth color (black,
+     * blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair,
+     * etc.
+     * Ignored if 'include_bounding_boxes' is set to false.
+     * 
+ * + * bool include_attributes = 3; + * + * @return This builder for chaining. + */ + public Builder clearIncludeAttributes() { + + includeAttributes_ = false; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.videointelligence.v1.PersonDetectionConfig) + } + + // @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.PersonDetectionConfig) + private static final com.google.cloud.videointelligence.v1.PersonDetectionConfig DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.videointelligence.v1.PersonDetectionConfig(); + } + + public static com.google.cloud.videointelligence.v1.PersonDetectionConfig getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PersonDetectionConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new PersonDetectionConfig(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.videointelligence.v1.PersonDetectionConfig getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/PersonDetectionConfigOrBuilder.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/PersonDetectionConfigOrBuilder.java new file mode 100644 index 000000000..de9e56561 --- /dev/null +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/PersonDetectionConfigOrBuilder.java @@ -0,0 +1,69 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/videointelligence/v1/video_intelligence.proto + +package com.google.cloud.videointelligence.v1; + +public interface PersonDetectionConfigOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.videointelligence.v1.PersonDetectionConfig) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Whether bounding boxes are included in the person detection annotation
+   * output.
+   * 
+ * + * bool include_bounding_boxes = 1; + * + * @return The includeBoundingBoxes. + */ + boolean getIncludeBoundingBoxes(); + + /** + * + * + *
+   * Whether to enable pose landmarks detection. Ignored if
+   * 'include_bounding_boxes' is set to false.
+   * 
+ * + * bool include_pose_landmarks = 2; + * + * @return The includePoseLandmarks. + */ + boolean getIncludePoseLandmarks(); + + /** + * + * + *
+   * Whether to enable person attributes detection, such as cloth color (black,
+   * blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair,
+   * etc.
+   * Ignored if 'include_bounding_boxes' is set to false.
+   * 
+ * + * bool include_attributes = 3; + * + * @return The includeAttributes. + */ + boolean getIncludeAttributes(); +} diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/SpeechRecognitionAlternative.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/SpeechRecognitionAlternative.java index f56285cb9..49b1ecfcc 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/SpeechRecognitionAlternative.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/SpeechRecognitionAlternative.java @@ -211,8 +211,8 @@ public float getConfidence() { * *
    * Output only. A list of word-specific information for each recognized word.
-   * Note: When `enable_speaker_diarization` is true, you will see all the words
-   * from the beginning of the audio.
+   * Note: When `enable_speaker_diarization` is set to true, you will see all
+   * the words from the beginning of the audio.
    * 
* * @@ -228,8 +228,8 @@ public java.util.List getWordsLi * *
    * Output only. A list of word-specific information for each recognized word.
-   * Note: When `enable_speaker_diarization` is true, you will see all the words
-   * from the beginning of the audio.
+   * Note: When `enable_speaker_diarization` is set to true, you will see all
+   * the words from the beginning of the audio.
    * 
* * @@ -246,8 +246,8 @@ public java.util.List getWordsLi * *
    * Output only. A list of word-specific information for each recognized word.
-   * Note: When `enable_speaker_diarization` is true, you will see all the words
-   * from the beginning of the audio.
+   * Note: When `enable_speaker_diarization` is set to true, you will see all
+   * the words from the beginning of the audio.
    * 
* * @@ -263,8 +263,8 @@ public int getWordsCount() { * *
    * Output only. A list of word-specific information for each recognized word.
-   * Note: When `enable_speaker_diarization` is true, you will see all the words
-   * from the beginning of the audio.
+   * Note: When `enable_speaker_diarization` is set to true, you will see all
+   * the words from the beginning of the audio.
    * 
* * @@ -280,8 +280,8 @@ public com.google.cloud.videointelligence.v1.WordInfo getWords(int index) { * *
    * Output only. A list of word-specific information for each recognized word.
-   * Note: When `enable_speaker_diarization` is true, you will see all the words
-   * from the beginning of the audio.
+   * Note: When `enable_speaker_diarization` is set to true, you will see all
+   * the words from the beginning of the audio.
    * 
* * @@ -888,8 +888,8 @@ private void ensureWordsIsMutable() { * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -908,8 +908,8 @@ public java.util.List getWordsLi * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -928,8 +928,8 @@ public int getWordsCount() { * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -948,8 +948,8 @@ public com.google.cloud.videointelligence.v1.WordInfo getWords(int index) { * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -974,8 +974,8 @@ public Builder setWords(int index, com.google.cloud.videointelligence.v1.WordInf * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -998,8 +998,8 @@ public Builder setWords( * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -1024,8 +1024,8 @@ public Builder addWords(com.google.cloud.videointelligence.v1.WordInfo value) { * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -1050,8 +1050,8 @@ public Builder addWords(int index, com.google.cloud.videointelligence.v1.WordInf * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -1074,8 +1074,8 @@ public Builder addWords( * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -1098,8 +1098,8 @@ public Builder addWords( * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -1122,8 +1122,8 @@ public Builder addAllWords( * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -1145,8 +1145,8 @@ public Builder clearWords() { * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -1168,8 +1168,8 @@ public Builder removeWords(int index) { * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -1184,8 +1184,8 @@ public com.google.cloud.videointelligence.v1.WordInfo.Builder getWordsBuilder(in * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -1204,8 +1204,8 @@ public com.google.cloud.videointelligence.v1.WordInfoOrBuilder getWordsOrBuilder * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -1225,8 +1225,8 @@ public com.google.cloud.videointelligence.v1.WordInfoOrBuilder getWordsOrBuilder * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -1242,8 +1242,8 @@ public com.google.cloud.videointelligence.v1.WordInfo.Builder addWordsBuilder() * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * @@ -1259,8 +1259,8 @@ public com.google.cloud.videointelligence.v1.WordInfo.Builder addWordsBuilder(in * *
      * Output only. A list of word-specific information for each recognized word.
-     * Note: When `enable_speaker_diarization` is true, you will see all the words
-     * from the beginning of the audio.
+     * Note: When `enable_speaker_diarization` is set to true, you will see all
+     * the words from the beginning of the audio.
      * 
* * diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/SpeechRecognitionAlternativeOrBuilder.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/SpeechRecognitionAlternativeOrBuilder.java index b301a9414..a55fb0a48 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/SpeechRecognitionAlternativeOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/SpeechRecognitionAlternativeOrBuilder.java @@ -71,8 +71,8 @@ public interface SpeechRecognitionAlternativeOrBuilder * *
    * Output only. A list of word-specific information for each recognized word.
-   * Note: When `enable_speaker_diarization` is true, you will see all the words
-   * from the beginning of the audio.
+   * Note: When `enable_speaker_diarization` is set to true, you will see all
+   * the words from the beginning of the audio.
    * 
* * @@ -85,8 +85,8 @@ public interface SpeechRecognitionAlternativeOrBuilder * *
    * Output only. A list of word-specific information for each recognized word.
-   * Note: When `enable_speaker_diarization` is true, you will see all the words
-   * from the beginning of the audio.
+   * Note: When `enable_speaker_diarization` is set to true, you will see all
+   * the words from the beginning of the audio.
    * 
* * @@ -99,8 +99,8 @@ public interface SpeechRecognitionAlternativeOrBuilder * *
    * Output only. A list of word-specific information for each recognized word.
-   * Note: When `enable_speaker_diarization` is true, you will see all the words
-   * from the beginning of the audio.
+   * Note: When `enable_speaker_diarization` is set to true, you will see all
+   * the words from the beginning of the audio.
    * 
* * @@ -113,8 +113,8 @@ public interface SpeechRecognitionAlternativeOrBuilder * *
    * Output only. A list of word-specific information for each recognized word.
-   * Note: When `enable_speaker_diarization` is true, you will see all the words
-   * from the beginning of the audio.
+   * Note: When `enable_speaker_diarization` is set to true, you will see all
+   * the words from the beginning of the audio.
    * 
* * @@ -128,8 +128,8 @@ public interface SpeechRecognitionAlternativeOrBuilder * *
    * Output only. A list of word-specific information for each recognized word.
-   * Note: When `enable_speaker_diarization` is true, you will see all the words
-   * from the beginning of the audio.
+   * Note: When `enable_speaker_diarization` is set to true, you will see all
+   * the words from the beginning of the audio.
    * 
* * diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/SpeechTranscription.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/SpeechTranscription.java index 7ac56a723..c2aa36943 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/SpeechTranscription.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/SpeechTranscription.java @@ -235,9 +235,9 @@ public com.google.cloud.videointelligence.v1.SpeechRecognitionAlternative getAlt * * *
-   * Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of
-   * the language in this result. This language code was detected to have the
-   * most likelihood of being spoken in the audio.
+   * Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
+   * language tag of the language in this result. This language code was
+   * detected to have the most likelihood of being spoken in the audio.
    * 
* * string language_code = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -260,9 +260,9 @@ public java.lang.String getLanguageCode() { * * *
-   * Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of
-   * the language in this result. This language code was detected to have the
-   * most likelihood of being spoken in the audio.
+   * Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
+   * language tag of the language in this result. This language code was
+   * detected to have the most likelihood of being spoken in the audio.
    * 
* * string language_code = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -1129,9 +1129,9 @@ public Builder removeAlternatives(int index) { * * *
-     * Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of
-     * the language in this result. This language code was detected to have the
-     * most likelihood of being spoken in the audio.
+     * Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
+     * language tag of the language in this result. This language code was
+     * detected to have the most likelihood of being spoken in the audio.
      * 
* * string language_code = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -1153,9 +1153,9 @@ public java.lang.String getLanguageCode() { * * *
-     * Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of
-     * the language in this result. This language code was detected to have the
-     * most likelihood of being spoken in the audio.
+     * Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
+     * language tag of the language in this result. This language code was
+     * detected to have the most likelihood of being spoken in the audio.
      * 
* * string language_code = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -1177,9 +1177,9 @@ public com.google.protobuf.ByteString getLanguageCodeBytes() { * * *
-     * Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of
-     * the language in this result. This language code was detected to have the
-     * most likelihood of being spoken in the audio.
+     * Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
+     * language tag of the language in this result. This language code was
+     * detected to have the most likelihood of being spoken in the audio.
      * 
* * string language_code = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -1200,9 +1200,9 @@ public Builder setLanguageCode(java.lang.String value) { * * *
-     * Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of
-     * the language in this result. This language code was detected to have the
-     * most likelihood of being spoken in the audio.
+     * Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
+     * language tag of the language in this result. This language code was
+     * detected to have the most likelihood of being spoken in the audio.
      * 
* * string language_code = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -1219,9 +1219,9 @@ public Builder clearLanguageCode() { * * *
-     * Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of
-     * the language in this result. This language code was detected to have the
-     * most likelihood of being spoken in the audio.
+     * Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
+     * language tag of the language in this result. This language code was
+     * detected to have the most likelihood of being spoken in the audio.
      * 
* * string language_code = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/SpeechTranscriptionConfig.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/SpeechTranscriptionConfig.java index 032def3f9..9b7dc587c 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/SpeechTranscriptionConfig.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/SpeechTranscriptionConfig.java @@ -452,7 +452,7 @@ public int getAudioTracks(int index) { * the top alternative of the recognition result using a speaker_tag provided * in the WordInfo. * Note: When this is true, we send all the words from the beginning of the - * audio for the top alternative in every consecutive responses. + * audio for the top alternative in every consecutive response. * This is done in order to improve our speaker tags as our models learn to * identify the speakers in the conversation over time. * @@ -472,9 +472,9 @@ public boolean getEnableSpeakerDiarization() { * * *
-   * Optional. If set, specifies the estimated number of speakers in the conversation.
-   * If not set, defaults to '2'.
-   * Ignored unless enable_speaker_diarization is set to true.
+   * Optional. If set, specifies the estimated number of speakers in the
+   * conversation. If not set, defaults to '2'. Ignored unless
+   * enable_speaker_diarization is set to true.
    * 
* * int32 diarization_speaker_count = 8 [(.google.api.field_behavior) = OPTIONAL]; @@ -1878,7 +1878,7 @@ public Builder clearAudioTracks() { * the top alternative of the recognition result using a speaker_tag provided * in the WordInfo. * Note: When this is true, we send all the words from the beginning of the - * audio for the top alternative in every consecutive responses. + * audio for the top alternative in every consecutive response. * This is done in order to improve our speaker tags as our models learn to * identify the speakers in the conversation over time. * @@ -1899,7 +1899,7 @@ public boolean getEnableSpeakerDiarization() { * the top alternative of the recognition result using a speaker_tag provided * in the WordInfo. * Note: When this is true, we send all the words from the beginning of the - * audio for the top alternative in every consecutive responses. + * audio for the top alternative in every consecutive response. * This is done in order to improve our speaker tags as our models learn to * identify the speakers in the conversation over time. * @@ -1923,7 +1923,7 @@ public Builder setEnableSpeakerDiarization(boolean value) { * the top alternative of the recognition result using a speaker_tag provided * in the WordInfo. * Note: When this is true, we send all the words from the beginning of the - * audio for the top alternative in every consecutive responses. + * audio for the top alternative in every consecutive response. * This is done in order to improve our speaker tags as our models learn to * identify the speakers in the conversation over time. * @@ -1944,9 +1944,9 @@ public Builder clearEnableSpeakerDiarization() { * * *
-     * Optional. If set, specifies the estimated number of speakers in the conversation.
-     * If not set, defaults to '2'.
-     * Ignored unless enable_speaker_diarization is set to true.
+     * Optional. If set, specifies the estimated number of speakers in the
+     * conversation. If not set, defaults to '2'. Ignored unless
+     * enable_speaker_diarization is set to true.
      * 
* * int32 diarization_speaker_count = 8 [(.google.api.field_behavior) = OPTIONAL]; @@ -1961,9 +1961,9 @@ public int getDiarizationSpeakerCount() { * * *
-     * Optional. If set, specifies the estimated number of speakers in the conversation.
-     * If not set, defaults to '2'.
-     * Ignored unless enable_speaker_diarization is set to true.
+     * Optional. If set, specifies the estimated number of speakers in the
+     * conversation. If not set, defaults to '2'. Ignored unless
+     * enable_speaker_diarization is set to true.
      * 
* * int32 diarization_speaker_count = 8 [(.google.api.field_behavior) = OPTIONAL]; @@ -1981,9 +1981,9 @@ public Builder setDiarizationSpeakerCount(int value) { * * *
-     * Optional. If set, specifies the estimated number of speakers in the conversation.
-     * If not set, defaults to '2'.
-     * Ignored unless enable_speaker_diarization is set to true.
+     * Optional. If set, specifies the estimated number of speakers in the
+     * conversation. If not set, defaults to '2'. Ignored unless
+     * enable_speaker_diarization is set to true.
      * 
* * int32 diarization_speaker_count = 8 [(.google.api.field_behavior) = OPTIONAL]; diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/SpeechTranscriptionConfigOrBuilder.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/SpeechTranscriptionConfigOrBuilder.java index 7ee35e28f..ca13e773e 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/SpeechTranscriptionConfigOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/SpeechTranscriptionConfigOrBuilder.java @@ -219,7 +219,7 @@ com.google.cloud.videointelligence.v1.SpeechContextOrBuilder getSpeechContextsOr * the top alternative of the recognition result using a speaker_tag provided * in the WordInfo. * Note: When this is true, we send all the words from the beginning of the - * audio for the top alternative in every consecutive responses. + * audio for the top alternative in every consecutive response. * This is done in order to improve our speaker tags as our models learn to * identify the speakers in the conversation over time. * @@ -234,9 +234,9 @@ com.google.cloud.videointelligence.v1.SpeechContextOrBuilder getSpeechContextsOr * * *
-   * Optional. If set, specifies the estimated number of speakers in the conversation.
-   * If not set, defaults to '2'.
-   * Ignored unless enable_speaker_diarization is set to true.
+   * Optional. If set, specifies the estimated number of speakers in the
+   * conversation. If not set, defaults to '2'. Ignored unless
+   * enable_speaker_diarization is set to true.
    * 
* * int32 diarization_speaker_count = 8 [(.google.api.field_behavior) = OPTIONAL]; diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/SpeechTranscriptionOrBuilder.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/SpeechTranscriptionOrBuilder.java index fe396c291..70d2da823 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/SpeechTranscriptionOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/SpeechTranscriptionOrBuilder.java @@ -107,9 +107,9 @@ public interface SpeechTranscriptionOrBuilder * * *
-   * Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of
-   * the language in this result. This language code was detected to have the
-   * most likelihood of being spoken in the audio.
+   * Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
+   * language tag of the language in this result. This language code was
+   * detected to have the most likelihood of being spoken in the audio.
    * 
* * string language_code = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -121,9 +121,9 @@ public interface SpeechTranscriptionOrBuilder * * *
-   * Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of
-   * the language in this result. This language code was detected to have the
-   * most likelihood of being spoken in the audio.
+   * Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
+   * language tag of the language in this result. This language code was
+   * detected to have the most likelihood of being spoken in the audio.
    * 
* * string language_code = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/TextAnnotation.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/TextAnnotation.java index 564c5959f..e229c5355 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/TextAnnotation.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/TextAnnotation.java @@ -42,6 +42,7 @@ private TextAnnotation(com.google.protobuf.GeneratedMessageV3.Builder builder private TextAnnotation() { text_ = ""; segments_ = java.util.Collections.emptyList(); + version_ = ""; } @java.lang.Override @@ -94,6 +95,13 @@ private TextAnnotation( extensionRegistry)); break; } + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + + version_ = s; + break; + } default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { @@ -250,6 +258,55 @@ public com.google.cloud.videointelligence.v1.TextSegmentOrBuilder getSegmentsOrB return segments_.get(index); } + public static final int VERSION_FIELD_NUMBER = 3; + private volatile java.lang.Object version_; + /** + * + * + *
+   * Feature version.
+   * 
+ * + * string version = 3; + * + * @return The version. + */ + @java.lang.Override + public java.lang.String getVersion() { + java.lang.Object ref = version_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + version_ = s; + return s; + } + } + /** + * + * + *
+   * Feature version.
+   * 
+ * + * string version = 3; + * + * @return The bytes for version. + */ + @java.lang.Override + public com.google.protobuf.ByteString getVersionBytes() { + java.lang.Object ref = version_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + version_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -270,6 +327,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io for (int i = 0; i < segments_.size(); i++) { output.writeMessage(2, segments_.get(i)); } + if (!getVersionBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, version_); + } unknownFields.writeTo(output); } @@ -285,6 +345,9 @@ public int getSerializedSize() { for (int i = 0; i < segments_.size(); i++) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, segments_.get(i)); } + if (!getVersionBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, version_); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -303,6 +366,7 @@ public boolean equals(final java.lang.Object obj) { if (!getText().equals(other.getText())) return false; if (!getSegmentsList().equals(other.getSegmentsList())) return false; + if (!getVersion().equals(other.getVersion())) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -320,6 +384,8 @@ public int hashCode() { hash = (37 * hash) + SEGMENTS_FIELD_NUMBER; hash = (53 * hash) + getSegmentsList().hashCode(); } + hash = (37 * hash) + VERSION_FIELD_NUMBER; + hash = (53 * hash) + getVersion().hashCode(); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -477,6 +543,8 @@ public Builder clear() { } else { segmentsBuilder_.clear(); } + version_ = ""; + return this; } @@ -515,6 +583,7 @@ public com.google.cloud.videointelligence.v1.TextAnnotation buildPartial() { } else { result.segments_ = segmentsBuilder_.build(); } + result.version_ = version_; onBuilt(); return result; } @@ -596,6 +665,10 @@ public Builder mergeFrom(com.google.cloud.videointelligence.v1.TextAnnotation ot } } } + if (!other.getVersion().isEmpty()) { + version_ = other.version_; + onChanged(); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -1087,6 +1160,112 @@ public com.google.cloud.videointelligence.v1.TextSegment.Builder addSegmentsBuil return segmentsBuilder_; } + private java.lang.Object version_ = ""; + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 3; + * + * @return The version. + */ + public java.lang.String getVersion() { + java.lang.Object ref = version_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + version_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 3; + * + * @return The bytes for version. + */ + public com.google.protobuf.ByteString getVersionBytes() { + java.lang.Object ref = version_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + version_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 3; + * + * @param value The version to set. + * @return This builder for chaining. + */ + public Builder setVersion(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + version_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 3; + * + * @return This builder for chaining. + */ + public Builder clearVersion() { + + version_ = getDefaultInstance().getVersion(); + onChanged(); + return this; + } + /** + * + * + *
+     * Feature version.
+     * 
+ * + * string version = 3; + * + * @param value The bytes for version to set. + * @return This builder for chaining. + */ + public Builder setVersionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + version_ = value; + onChanged(); + return this; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/TextAnnotationOrBuilder.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/TextAnnotationOrBuilder.java index 823fda9f7..64859fa22 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/TextAnnotationOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/TextAnnotationOrBuilder.java @@ -99,4 +99,29 @@ public interface TextAnnotationOrBuilder * repeated .google.cloud.videointelligence.v1.TextSegment segments = 2; */ com.google.cloud.videointelligence.v1.TextSegmentOrBuilder getSegmentsOrBuilder(int index); + + /** + * + * + *
+   * Feature version.
+   * 
+ * + * string version = 3; + * + * @return The version. + */ + java.lang.String getVersion(); + /** + * + * + *
+   * Feature version.
+   * 
+ * + * string version = 3; + * + * @return The bytes for version. + */ + com.google.protobuf.ByteString getVersionBytes(); } diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoAnnotationProgress.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoAnnotationProgress.java index 87468227e..049c3c5b8 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoAnnotationProgress.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoAnnotationProgress.java @@ -178,7 +178,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * *
    * Video file location in
-   * [Google Cloud Storage](https://cloud.google.com/storage/).
+   * [Cloud Storage](https://cloud.google.com/storage/).
    * 
* * string input_uri = 1; @@ -202,7 +202,7 @@ public java.lang.String getInputUri() { * *
    * Video file location in
-   * [Google Cloud Storage](https://cloud.google.com/storage/).
+   * [Cloud Storage](https://cloud.google.com/storage/).
    * 
* * string input_uri = 1; @@ -340,7 +340,7 @@ public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { * *
    * Specifies which feature is being tracked if the request contains more than
-   * one features.
+   * one feature.
    * 
* * .google.cloud.videointelligence.v1.Feature feature = 5; @@ -356,7 +356,7 @@ public int getFeatureValue() { * *
    * Specifies which feature is being tracked if the request contains more than
-   * one features.
+   * one feature.
    * 
* * .google.cloud.videointelligence.v1.Feature feature = 5; @@ -378,7 +378,7 @@ public com.google.cloud.videointelligence.v1.Feature getFeature() { * *
    * Specifies which segment is being tracked if the request contains more than
-   * one segments.
+   * one segment.
    * 
* * .google.cloud.videointelligence.v1.VideoSegment segment = 6; @@ -394,7 +394,7 @@ public boolean hasSegment() { * *
    * Specifies which segment is being tracked if the request contains more than
-   * one segments.
+   * one segment.
    * 
* * .google.cloud.videointelligence.v1.VideoSegment segment = 6; @@ -412,7 +412,7 @@ public com.google.cloud.videointelligence.v1.VideoSegment getSegment() { * *
    * Specifies which segment is being tracked if the request contains more than
-   * one segments.
+   * one segment.
    * 
* * .google.cloud.videointelligence.v1.VideoSegment segment = 6; @@ -864,7 +864,7 @@ public Builder mergeFrom( * *
      * Video file location in
-     * [Google Cloud Storage](https://cloud.google.com/storage/).
+     * [Cloud Storage](https://cloud.google.com/storage/).
      * 
* * string input_uri = 1; @@ -887,7 +887,7 @@ public java.lang.String getInputUri() { * *
      * Video file location in
-     * [Google Cloud Storage](https://cloud.google.com/storage/).
+     * [Cloud Storage](https://cloud.google.com/storage/).
      * 
* * string input_uri = 1; @@ -910,7 +910,7 @@ public com.google.protobuf.ByteString getInputUriBytes() { * *
      * Video file location in
-     * [Google Cloud Storage](https://cloud.google.com/storage/).
+     * [Cloud Storage](https://cloud.google.com/storage/).
      * 
* * string input_uri = 1; @@ -932,7 +932,7 @@ public Builder setInputUri(java.lang.String value) { * *
      * Video file location in
-     * [Google Cloud Storage](https://cloud.google.com/storage/).
+     * [Cloud Storage](https://cloud.google.com/storage/).
      * 
* * string input_uri = 1; @@ -950,7 +950,7 @@ public Builder clearInputUri() { * *
      * Video file location in
-     * [Google Cloud Storage](https://cloud.google.com/storage/).
+     * [Cloud Storage](https://cloud.google.com/storage/).
      * 
* * string input_uri = 1; @@ -1392,7 +1392,7 @@ public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { * *
      * Specifies which feature is being tracked if the request contains more than
-     * one features.
+     * one feature.
      * 
* * .google.cloud.videointelligence.v1.Feature feature = 5; @@ -1408,7 +1408,7 @@ public int getFeatureValue() { * *
      * Specifies which feature is being tracked if the request contains more than
-     * one features.
+     * one feature.
      * 
* * .google.cloud.videointelligence.v1.Feature feature = 5; @@ -1427,7 +1427,7 @@ public Builder setFeatureValue(int value) { * *
      * Specifies which feature is being tracked if the request contains more than
-     * one features.
+     * one feature.
      * 
* * .google.cloud.videointelligence.v1.Feature feature = 5; @@ -1446,7 +1446,7 @@ public com.google.cloud.videointelligence.v1.Feature getFeature() { * *
      * Specifies which feature is being tracked if the request contains more than
-     * one features.
+     * one feature.
      * 
* * .google.cloud.videointelligence.v1.Feature feature = 5; @@ -1468,7 +1468,7 @@ public Builder setFeature(com.google.cloud.videointelligence.v1.Feature value) { * *
      * Specifies which feature is being tracked if the request contains more than
-     * one features.
+     * one feature.
      * 
* * .google.cloud.videointelligence.v1.Feature feature = 5; @@ -1493,7 +1493,7 @@ public Builder clearFeature() { * *
      * Specifies which segment is being tracked if the request contains more than
-     * one segments.
+     * one segment.
      * 
* * .google.cloud.videointelligence.v1.VideoSegment segment = 6; @@ -1508,7 +1508,7 @@ public boolean hasSegment() { * *
      * Specifies which segment is being tracked if the request contains more than
-     * one segments.
+     * one segment.
      * 
* * .google.cloud.videointelligence.v1.VideoSegment segment = 6; @@ -1529,7 +1529,7 @@ public com.google.cloud.videointelligence.v1.VideoSegment getSegment() { * *
      * Specifies which segment is being tracked if the request contains more than
-     * one segments.
+     * one segment.
      * 
* * .google.cloud.videointelligence.v1.VideoSegment segment = 6; @@ -1552,7 +1552,7 @@ public Builder setSegment(com.google.cloud.videointelligence.v1.VideoSegment val * *
      * Specifies which segment is being tracked if the request contains more than
-     * one segments.
+     * one segment.
      * 
* * .google.cloud.videointelligence.v1.VideoSegment segment = 6; @@ -1573,7 +1573,7 @@ public Builder setSegment( * *
      * Specifies which segment is being tracked if the request contains more than
-     * one segments.
+     * one segment.
      * 
* * .google.cloud.videointelligence.v1.VideoSegment segment = 6; @@ -1600,7 +1600,7 @@ public Builder mergeSegment(com.google.cloud.videointelligence.v1.VideoSegment v * *
      * Specifies which segment is being tracked if the request contains more than
-     * one segments.
+     * one segment.
      * 
* * .google.cloud.videointelligence.v1.VideoSegment segment = 6; @@ -1621,7 +1621,7 @@ public Builder clearSegment() { * *
      * Specifies which segment is being tracked if the request contains more than
-     * one segments.
+     * one segment.
      * 
* * .google.cloud.videointelligence.v1.VideoSegment segment = 6; @@ -1636,7 +1636,7 @@ public com.google.cloud.videointelligence.v1.VideoSegment.Builder getSegmentBuil * *
      * Specifies which segment is being tracked if the request contains more than
-     * one segments.
+     * one segment.
      * 
* * .google.cloud.videointelligence.v1.VideoSegment segment = 6; @@ -1655,7 +1655,7 @@ public com.google.cloud.videointelligence.v1.VideoSegmentOrBuilder getSegmentOrB * *
      * Specifies which segment is being tracked if the request contains more than
-     * one segments.
+     * one segment.
      * 
* * .google.cloud.videointelligence.v1.VideoSegment segment = 6; diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoAnnotationProgressOrBuilder.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoAnnotationProgressOrBuilder.java index 9740d814d..1b2798f00 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoAnnotationProgressOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoAnnotationProgressOrBuilder.java @@ -28,7 +28,7 @@ public interface VideoAnnotationProgressOrBuilder * *
    * Video file location in
-   * [Google Cloud Storage](https://cloud.google.com/storage/).
+   * [Cloud Storage](https://cloud.google.com/storage/).
    * 
* * string input_uri = 1; @@ -41,7 +41,7 @@ public interface VideoAnnotationProgressOrBuilder * *
    * Video file location in
-   * [Google Cloud Storage](https://cloud.google.com/storage/).
+   * [Cloud Storage](https://cloud.google.com/storage/).
    * 
* * string input_uri = 1; @@ -139,7 +139,7 @@ public interface VideoAnnotationProgressOrBuilder * *
    * Specifies which feature is being tracked if the request contains more than
-   * one features.
+   * one feature.
    * 
* * .google.cloud.videointelligence.v1.Feature feature = 5; @@ -152,7 +152,7 @@ public interface VideoAnnotationProgressOrBuilder * *
    * Specifies which feature is being tracked if the request contains more than
-   * one features.
+   * one feature.
    * 
* * .google.cloud.videointelligence.v1.Feature feature = 5; @@ -166,7 +166,7 @@ public interface VideoAnnotationProgressOrBuilder * *
    * Specifies which segment is being tracked if the request contains more than
-   * one segments.
+   * one segment.
    * 
* * .google.cloud.videointelligence.v1.VideoSegment segment = 6; @@ -179,7 +179,7 @@ public interface VideoAnnotationProgressOrBuilder * *
    * Specifies which segment is being tracked if the request contains more than
-   * one segments.
+   * one segment.
    * 
* * .google.cloud.videointelligence.v1.VideoSegment segment = 6; @@ -192,7 +192,7 @@ public interface VideoAnnotationProgressOrBuilder * *
    * Specifies which segment is being tracked if the request contains more than
-   * one segments.
+   * one segment.
    * 
* * .google.cloud.videointelligence.v1.VideoSegment segment = 6; diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoAnnotationResults.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoAnnotationResults.java index 959e5a018..a42c9e768 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoAnnotationResults.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoAnnotationResults.java @@ -45,11 +45,13 @@ private VideoAnnotationResults() { shotPresenceLabelAnnotations_ = java.util.Collections.emptyList(); frameLabelAnnotations_ = java.util.Collections.emptyList(); faceAnnotations_ = java.util.Collections.emptyList(); + faceDetectionAnnotations_ = java.util.Collections.emptyList(); shotAnnotations_ = java.util.Collections.emptyList(); speechTranscriptions_ = java.util.Collections.emptyList(); textAnnotations_ = java.util.Collections.emptyList(); objectAnnotations_ = java.util.Collections.emptyList(); logoRecognitionAnnotations_ = java.util.Collections.emptyList(); + personDetectionAnnotations_ = java.util.Collections.emptyList(); } @java.lang.Override @@ -146,10 +148,10 @@ private VideoAnnotationResults( } case 50: { - if (!((mutable_bitField0_ & 0x00000040) != 0)) { + if (!((mutable_bitField0_ & 0x00000080) != 0)) { shotAnnotations_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000040; + mutable_bitField0_ |= 0x00000080; } shotAnnotations_.add( input.readMessage( @@ -208,11 +210,11 @@ private VideoAnnotationResults( } case 90: { - if (!((mutable_bitField0_ & 0x00000080) != 0)) { + if (!((mutable_bitField0_ & 0x00000100) != 0)) { speechTranscriptions_ = new java.util.ArrayList< com.google.cloud.videointelligence.v1.SpeechTranscription>(); - mutable_bitField0_ |= 0x00000080; + mutable_bitField0_ |= 0x00000100; } speechTranscriptions_.add( input.readMessage( @@ -222,10 +224,10 @@ private VideoAnnotationResults( } case 98: { - if (!((mutable_bitField0_ & 0x00000100) != 0)) { + if (!((mutable_bitField0_ & 0x00000200) != 0)) { textAnnotations_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000100; + mutable_bitField0_ |= 0x00000200; } textAnnotations_.add( input.readMessage( @@ -233,13 +235,27 @@ private VideoAnnotationResults( extensionRegistry)); break; } + case 106: + { + if (!((mutable_bitField0_ & 0x00000040) != 0)) { + faceDetectionAnnotations_ = + new java.util.ArrayList< + com.google.cloud.videointelligence.v1.FaceDetectionAnnotation>(); + mutable_bitField0_ |= 0x00000040; + } + faceDetectionAnnotations_.add( + input.readMessage( + com.google.cloud.videointelligence.v1.FaceDetectionAnnotation.parser(), + extensionRegistry)); + break; + } case 114: { - if (!((mutable_bitField0_ & 0x00000200) != 0)) { + if (!((mutable_bitField0_ & 0x00000400) != 0)) { objectAnnotations_ = new java.util.ArrayList< com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation>(); - mutable_bitField0_ |= 0x00000200; + mutable_bitField0_ |= 0x00000400; } objectAnnotations_.add( input.readMessage( @@ -249,11 +265,11 @@ private VideoAnnotationResults( } case 154: { - if (!((mutable_bitField0_ & 0x00000400) != 0)) { + if (!((mutable_bitField0_ & 0x00000800) != 0)) { logoRecognitionAnnotations_ = new java.util.ArrayList< com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation>(); - mutable_bitField0_ |= 0x00000400; + mutable_bitField0_ |= 0x00000800; } logoRecognitionAnnotations_.add( input.readMessage( @@ -261,6 +277,20 @@ private VideoAnnotationResults( extensionRegistry)); break; } + case 162: + { + if (!((mutable_bitField0_ & 0x00001000) != 0)) { + personDetectionAnnotations_ = + new java.util.ArrayList< + com.google.cloud.videointelligence.v1.PersonDetectionAnnotation>(); + mutable_bitField0_ |= 0x00001000; + } + personDetectionAnnotations_.add( + input.readMessage( + com.google.cloud.videointelligence.v1.PersonDetectionAnnotation.parser(), + extensionRegistry)); + break; + } case 186: { if (!((mutable_bitField0_ & 0x00000002) != 0)) { @@ -315,22 +345,30 @@ private VideoAnnotationResults( if (((mutable_bitField0_ & 0x00000020) != 0)) { faceAnnotations_ = java.util.Collections.unmodifiableList(faceAnnotations_); } - if (((mutable_bitField0_ & 0x00000040) != 0)) { + if (((mutable_bitField0_ & 0x00000080) != 0)) { shotAnnotations_ = java.util.Collections.unmodifiableList(shotAnnotations_); } - if (((mutable_bitField0_ & 0x00000080) != 0)) { + if (((mutable_bitField0_ & 0x00000100) != 0)) { speechTranscriptions_ = java.util.Collections.unmodifiableList(speechTranscriptions_); } - if (((mutable_bitField0_ & 0x00000100) != 0)) { + if (((mutable_bitField0_ & 0x00000200) != 0)) { textAnnotations_ = java.util.Collections.unmodifiableList(textAnnotations_); } - if (((mutable_bitField0_ & 0x00000200) != 0)) { - objectAnnotations_ = java.util.Collections.unmodifiableList(objectAnnotations_); + if (((mutable_bitField0_ & 0x00000040) != 0)) { + faceDetectionAnnotations_ = + java.util.Collections.unmodifiableList(faceDetectionAnnotations_); } if (((mutable_bitField0_ & 0x00000400) != 0)) { + objectAnnotations_ = java.util.Collections.unmodifiableList(objectAnnotations_); + } + if (((mutable_bitField0_ & 0x00000800) != 0)) { logoRecognitionAnnotations_ = java.util.Collections.unmodifiableList(logoRecognitionAnnotations_); } + if (((mutable_bitField0_ & 0x00001000) != 0)) { + personDetectionAnnotations_ = + java.util.Collections.unmodifiableList(personDetectionAnnotations_); + } if (((mutable_bitField0_ & 0x00000002) != 0)) { segmentPresenceLabelAnnotations_ = java.util.Collections.unmodifiableList(segmentPresenceLabelAnnotations_); @@ -366,7 +404,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * *
    * Video file location in
-   * [Google Cloud Storage](https://cloud.google.com/storage/).
+   * [Cloud Storage](https://cloud.google.com/storage/).
    * 
* * string input_uri = 1; @@ -390,7 +428,7 @@ public java.lang.String getInputUri() { * *
    * Video file location in
-   * [Google Cloud Storage](https://cloud.google.com/storage/).
+   * [Cloud Storage](https://cloud.google.com/storage/).
    * 
* * string input_uri = 1; @@ -465,7 +503,7 @@ public com.google.cloud.videointelligence.v1.VideoSegmentOrBuilder getSegmentOrB * * *
-   * Topical label annotations on video level or user specified segment level.
+   * Topical label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label.
    * 
* @@ -482,7 +520,7 @@ public com.google.cloud.videointelligence.v1.VideoSegmentOrBuilder getSegmentOrB * * *
-   * Topical label annotations on video level or user specified segment level.
+   * Topical label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label.
    * 
* @@ -499,7 +537,7 @@ public com.google.cloud.videointelligence.v1.VideoSegmentOrBuilder getSegmentOrB * * *
-   * Topical label annotations on video level or user specified segment level.
+   * Topical label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label.
    * 
* @@ -515,7 +553,7 @@ public int getSegmentLabelAnnotationsCount() { * * *
-   * Topical label annotations on video level or user specified segment level.
+   * Topical label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label.
    * 
* @@ -532,7 +570,7 @@ public com.google.cloud.videointelligence.v1.LabelAnnotation getSegmentLabelAnno * * *
-   * Topical label annotations on video level or user specified segment level.
+   * Topical label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label.
    * 
* @@ -553,7 +591,7 @@ public com.google.cloud.videointelligence.v1.LabelAnnotation getSegmentLabelAnno * * *
-   * Presence label annotations on video level or user specified segment level.
+   * Presence label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label. Compared to the
    * existing topical `segment_label_annotations`, this field presents more
    * fine-grained, segment-level labels detected in video content and is made
@@ -574,7 +612,7 @@ public com.google.cloud.videointelligence.v1.LabelAnnotation getSegmentLabelAnno
    *
    *
    * 
-   * Presence label annotations on video level or user specified segment level.
+   * Presence label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label. Compared to the
    * existing topical `segment_label_annotations`, this field presents more
    * fine-grained, segment-level labels detected in video content and is made
@@ -595,7 +633,7 @@ public com.google.cloud.videointelligence.v1.LabelAnnotation getSegmentLabelAnno
    *
    *
    * 
-   * Presence label annotations on video level or user specified segment level.
+   * Presence label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label. Compared to the
    * existing topical `segment_label_annotations`, this field presents more
    * fine-grained, segment-level labels detected in video content and is made
@@ -615,7 +653,7 @@ public int getSegmentPresenceLabelAnnotationsCount() {
    *
    *
    * 
-   * Presence label annotations on video level or user specified segment level.
+   * Presence label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label. Compared to the
    * existing topical `segment_label_annotations`, this field presents more
    * fine-grained, segment-level labels detected in video content and is made
@@ -636,7 +674,7 @@ public com.google.cloud.videointelligence.v1.LabelAnnotation getSegmentPresenceL
    *
    *
    * 
-   * Presence label annotations on video level or user specified segment level.
+   * Presence label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label. Compared to the
    * existing topical `segment_label_annotations`, this field presents more
    * fine-grained, segment-level labels detected in video content and is made
@@ -927,12 +965,15 @@ public com.google.cloud.videointelligence.v1.LabelAnnotation getFrameLabelAnnota
    *
    *
    * 
-   * Face annotations. There is exactly one element for each unique face.
+   * Deprecated. Please use `face_detection_annotations` instead.
    * 
* - * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5; + * + * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5 [deprecated = true]; + * */ @java.lang.Override + @java.lang.Deprecated public java.util.List getFaceAnnotationsList() { return faceAnnotations_; @@ -941,12 +982,15 @@ public com.google.cloud.videointelligence.v1.LabelAnnotation getFrameLabelAnnota * * *
-   * Face annotations. There is exactly one element for each unique face.
+   * Deprecated. Please use `face_detection_annotations` instead.
    * 
* - * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5; + * + * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5 [deprecated = true]; + * */ @java.lang.Override + @java.lang.Deprecated public java.util.List getFaceAnnotationsOrBuilderList() { return faceAnnotations_; @@ -955,12 +999,15 @@ public com.google.cloud.videointelligence.v1.LabelAnnotation getFrameLabelAnnota * * *
-   * Face annotations. There is exactly one element for each unique face.
+   * Deprecated. Please use `face_detection_annotations` instead.
    * 
* - * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5; + * + * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5 [deprecated = true]; + * */ @java.lang.Override + @java.lang.Deprecated public int getFaceAnnotationsCount() { return faceAnnotations_.size(); } @@ -968,12 +1015,15 @@ public int getFaceAnnotationsCount() { * * *
-   * Face annotations. There is exactly one element for each unique face.
+   * Deprecated. Please use `face_detection_annotations` instead.
    * 
* - * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5; + * + * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5 [deprecated = true]; + * */ @java.lang.Override + @java.lang.Deprecated public com.google.cloud.videointelligence.v1.FaceAnnotation getFaceAnnotations(int index) { return faceAnnotations_.get(index); } @@ -981,17 +1031,104 @@ public com.google.cloud.videointelligence.v1.FaceAnnotation getFaceAnnotations(i * * *
-   * Face annotations. There is exactly one element for each unique face.
+   * Deprecated. Please use `face_detection_annotations` instead.
    * 
* - * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5; + * + * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5 [deprecated = true]; + * */ @java.lang.Override + @java.lang.Deprecated public com.google.cloud.videointelligence.v1.FaceAnnotationOrBuilder getFaceAnnotationsOrBuilder( int index) { return faceAnnotations_.get(index); } + public static final int FACE_DETECTION_ANNOTATIONS_FIELD_NUMBER = 13; + private java.util.List + faceDetectionAnnotations_; + /** + * + * + *
+   * Face detection annotations.
+   * 
+ * + * + * repeated .google.cloud.videointelligence.v1.FaceDetectionAnnotation face_detection_annotations = 13; + * + */ + @java.lang.Override + public java.util.List + getFaceDetectionAnnotationsList() { + return faceDetectionAnnotations_; + } + /** + * + * + *
+   * Face detection annotations.
+   * 
+ * + * + * repeated .google.cloud.videointelligence.v1.FaceDetectionAnnotation face_detection_annotations = 13; + * + */ + @java.lang.Override + public java.util.List< + ? extends com.google.cloud.videointelligence.v1.FaceDetectionAnnotationOrBuilder> + getFaceDetectionAnnotationsOrBuilderList() { + return faceDetectionAnnotations_; + } + /** + * + * + *
+   * Face detection annotations.
+   * 
+ * + * + * repeated .google.cloud.videointelligence.v1.FaceDetectionAnnotation face_detection_annotations = 13; + * + */ + @java.lang.Override + public int getFaceDetectionAnnotationsCount() { + return faceDetectionAnnotations_.size(); + } + /** + * + * + *
+   * Face detection annotations.
+   * 
+ * + * + * repeated .google.cloud.videointelligence.v1.FaceDetectionAnnotation face_detection_annotations = 13; + * + */ + @java.lang.Override + public com.google.cloud.videointelligence.v1.FaceDetectionAnnotation getFaceDetectionAnnotations( + int index) { + return faceDetectionAnnotations_.get(index); + } + /** + * + * + *
+   * Face detection annotations.
+   * 
+ * + * + * repeated .google.cloud.videointelligence.v1.FaceDetectionAnnotation face_detection_annotations = 13; + * + */ + @java.lang.Override + public com.google.cloud.videointelligence.v1.FaceDetectionAnnotationOrBuilder + getFaceDetectionAnnotationsOrBuilder(int index) { + return faceDetectionAnnotations_.get(index); + } + public static final int SHOT_ANNOTATIONS_FIELD_NUMBER = 6; private java.util.List shotAnnotations_; /** @@ -1448,6 +1585,90 @@ public int getLogoRecognitionAnnotationsCount() { return logoRecognitionAnnotations_.get(index); } + public static final int PERSON_DETECTION_ANNOTATIONS_FIELD_NUMBER = 20; + private java.util.List + personDetectionAnnotations_; + /** + * + * + *
+   * Person detection annotations.
+   * 
+ * + * + * repeated .google.cloud.videointelligence.v1.PersonDetectionAnnotation person_detection_annotations = 20; + * + */ + @java.lang.Override + public java.util.List + getPersonDetectionAnnotationsList() { + return personDetectionAnnotations_; + } + /** + * + * + *
+   * Person detection annotations.
+   * 
+ * + * + * repeated .google.cloud.videointelligence.v1.PersonDetectionAnnotation person_detection_annotations = 20; + * + */ + @java.lang.Override + public java.util.List< + ? extends com.google.cloud.videointelligence.v1.PersonDetectionAnnotationOrBuilder> + getPersonDetectionAnnotationsOrBuilderList() { + return personDetectionAnnotations_; + } + /** + * + * + *
+   * Person detection annotations.
+   * 
+ * + * + * repeated .google.cloud.videointelligence.v1.PersonDetectionAnnotation person_detection_annotations = 20; + * + */ + @java.lang.Override + public int getPersonDetectionAnnotationsCount() { + return personDetectionAnnotations_.size(); + } + /** + * + * + *
+   * Person detection annotations.
+   * 
+ * + * + * repeated .google.cloud.videointelligence.v1.PersonDetectionAnnotation person_detection_annotations = 20; + * + */ + @java.lang.Override + public com.google.cloud.videointelligence.v1.PersonDetectionAnnotation + getPersonDetectionAnnotations(int index) { + return personDetectionAnnotations_.get(index); + } + /** + * + * + *
+   * Person detection annotations.
+   * 
+ * + * + * repeated .google.cloud.videointelligence.v1.PersonDetectionAnnotation person_detection_annotations = 20; + * + */ + @java.lang.Override + public com.google.cloud.videointelligence.v1.PersonDetectionAnnotationOrBuilder + getPersonDetectionAnnotationsOrBuilder(int index) { + return personDetectionAnnotations_.get(index); + } + public static final int ERROR_FIELD_NUMBER = 9; private com.google.rpc.Status error_; /** @@ -1544,12 +1765,18 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io for (int i = 0; i < textAnnotations_.size(); i++) { output.writeMessage(12, textAnnotations_.get(i)); } + for (int i = 0; i < faceDetectionAnnotations_.size(); i++) { + output.writeMessage(13, faceDetectionAnnotations_.get(i)); + } for (int i = 0; i < objectAnnotations_.size(); i++) { output.writeMessage(14, objectAnnotations_.get(i)); } for (int i = 0; i < logoRecognitionAnnotations_.size(); i++) { output.writeMessage(19, logoRecognitionAnnotations_.get(i)); } + for (int i = 0; i < personDetectionAnnotations_.size(); i++) { + output.writeMessage(20, personDetectionAnnotations_.get(i)); + } for (int i = 0; i < segmentPresenceLabelAnnotations_.size(); i++) { output.writeMessage(23, segmentPresenceLabelAnnotations_.get(i)); } @@ -1605,6 +1832,11 @@ public int getSerializedSize() { for (int i = 0; i < textAnnotations_.size(); i++) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(12, textAnnotations_.get(i)); } + for (int i = 0; i < faceDetectionAnnotations_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 13, faceDetectionAnnotations_.get(i)); + } for (int i = 0; i < objectAnnotations_.size(); i++) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(14, objectAnnotations_.get(i)); @@ -1614,6 +1846,11 @@ public int getSerializedSize() { com.google.protobuf.CodedOutputStream.computeMessageSize( 19, logoRecognitionAnnotations_.get(i)); } + for (int i = 0; i < personDetectionAnnotations_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 20, personDetectionAnnotations_.get(i)); + } for (int i = 0; i < segmentPresenceLabelAnnotations_.size(); i++) { size += com.google.protobuf.CodedOutputStream.computeMessageSize( @@ -1654,6 +1891,8 @@ public boolean equals(final java.lang.Object obj) { return false; if (!getFrameLabelAnnotationsList().equals(other.getFrameLabelAnnotationsList())) return false; if (!getFaceAnnotationsList().equals(other.getFaceAnnotationsList())) return false; + if (!getFaceDetectionAnnotationsList().equals(other.getFaceDetectionAnnotationsList())) + return false; if (!getShotAnnotationsList().equals(other.getShotAnnotationsList())) return false; if (hasExplicitAnnotation() != other.hasExplicitAnnotation()) return false; if (hasExplicitAnnotation()) { @@ -1664,6 +1903,8 @@ public boolean equals(final java.lang.Object obj) { if (!getObjectAnnotationsList().equals(other.getObjectAnnotationsList())) return false; if (!getLogoRecognitionAnnotationsList().equals(other.getLogoRecognitionAnnotationsList())) return false; + if (!getPersonDetectionAnnotationsList().equals(other.getPersonDetectionAnnotationsList())) + return false; if (hasError() != other.hasError()) return false; if (hasError()) { if (!getError().equals(other.getError())) return false; @@ -1709,6 +1950,10 @@ public int hashCode() { hash = (37 * hash) + FACE_ANNOTATIONS_FIELD_NUMBER; hash = (53 * hash) + getFaceAnnotationsList().hashCode(); } + if (getFaceDetectionAnnotationsCount() > 0) { + hash = (37 * hash) + FACE_DETECTION_ANNOTATIONS_FIELD_NUMBER; + hash = (53 * hash) + getFaceDetectionAnnotationsList().hashCode(); + } if (getShotAnnotationsCount() > 0) { hash = (37 * hash) + SHOT_ANNOTATIONS_FIELD_NUMBER; hash = (53 * hash) + getShotAnnotationsList().hashCode(); @@ -1733,6 +1978,10 @@ public int hashCode() { hash = (37 * hash) + LOGO_RECOGNITION_ANNOTATIONS_FIELD_NUMBER; hash = (53 * hash) + getLogoRecognitionAnnotationsList().hashCode(); } + if (getPersonDetectionAnnotationsCount() > 0) { + hash = (37 * hash) + PERSON_DETECTION_ANNOTATIONS_FIELD_NUMBER; + hash = (53 * hash) + getPersonDetectionAnnotationsList().hashCode(); + } if (hasError()) { hash = (37 * hash) + ERROR_FIELD_NUMBER; hash = (53 * hash) + getError().hashCode(); @@ -1884,11 +2133,13 @@ private void maybeForceBuilderInitialization() { getShotPresenceLabelAnnotationsFieldBuilder(); getFrameLabelAnnotationsFieldBuilder(); getFaceAnnotationsFieldBuilder(); + getFaceDetectionAnnotationsFieldBuilder(); getShotAnnotationsFieldBuilder(); getSpeechTranscriptionsFieldBuilder(); getTextAnnotationsFieldBuilder(); getObjectAnnotationsFieldBuilder(); getLogoRecognitionAnnotationsFieldBuilder(); + getPersonDetectionAnnotationsFieldBuilder(); } } @@ -1939,9 +2190,15 @@ public Builder clear() { } else { faceAnnotationsBuilder_.clear(); } + if (faceDetectionAnnotationsBuilder_ == null) { + faceDetectionAnnotations_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + } else { + faceDetectionAnnotationsBuilder_.clear(); + } if (shotAnnotationsBuilder_ == null) { shotAnnotations_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000040); + bitField0_ = (bitField0_ & ~0x00000080); } else { shotAnnotationsBuilder_.clear(); } @@ -1953,28 +2210,34 @@ public Builder clear() { } if (speechTranscriptionsBuilder_ == null) { speechTranscriptions_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000080); + bitField0_ = (bitField0_ & ~0x00000100); } else { speechTranscriptionsBuilder_.clear(); } if (textAnnotationsBuilder_ == null) { textAnnotations_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000100); + bitField0_ = (bitField0_ & ~0x00000200); } else { textAnnotationsBuilder_.clear(); } if (objectAnnotationsBuilder_ == null) { objectAnnotations_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000200); + bitField0_ = (bitField0_ & ~0x00000400); } else { objectAnnotationsBuilder_.clear(); } if (logoRecognitionAnnotationsBuilder_ == null) { logoRecognitionAnnotations_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000400); + bitField0_ = (bitField0_ & ~0x00000800); } else { logoRecognitionAnnotationsBuilder_.clear(); } + if (personDetectionAnnotationsBuilder_ == null) { + personDetectionAnnotations_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00001000); + } else { + personDetectionAnnotationsBuilder_.clear(); + } if (errorBuilder_ == null) { error_ = null; } else { @@ -2073,11 +2336,21 @@ public com.google.cloud.videointelligence.v1.VideoAnnotationResults buildPartial } else { result.faceAnnotations_ = faceAnnotationsBuilder_.build(); } - if (shotAnnotationsBuilder_ == null) { + if (faceDetectionAnnotationsBuilder_ == null) { if (((bitField0_ & 0x00000040) != 0)) { - shotAnnotations_ = java.util.Collections.unmodifiableList(shotAnnotations_); + faceDetectionAnnotations_ = + java.util.Collections.unmodifiableList(faceDetectionAnnotations_); bitField0_ = (bitField0_ & ~0x00000040); } + result.faceDetectionAnnotations_ = faceDetectionAnnotations_; + } else { + result.faceDetectionAnnotations_ = faceDetectionAnnotationsBuilder_.build(); + } + if (shotAnnotationsBuilder_ == null) { + if (((bitField0_ & 0x00000080) != 0)) { + shotAnnotations_ = java.util.Collections.unmodifiableList(shotAnnotations_); + bitField0_ = (bitField0_ & ~0x00000080); + } result.shotAnnotations_ = shotAnnotations_; } else { result.shotAnnotations_ = shotAnnotationsBuilder_.build(); @@ -2088,42 +2361,52 @@ public com.google.cloud.videointelligence.v1.VideoAnnotationResults buildPartial result.explicitAnnotation_ = explicitAnnotationBuilder_.build(); } if (speechTranscriptionsBuilder_ == null) { - if (((bitField0_ & 0x00000080) != 0)) { + if (((bitField0_ & 0x00000100) != 0)) { speechTranscriptions_ = java.util.Collections.unmodifiableList(speechTranscriptions_); - bitField0_ = (bitField0_ & ~0x00000080); + bitField0_ = (bitField0_ & ~0x00000100); } result.speechTranscriptions_ = speechTranscriptions_; } else { result.speechTranscriptions_ = speechTranscriptionsBuilder_.build(); } if (textAnnotationsBuilder_ == null) { - if (((bitField0_ & 0x00000100) != 0)) { + if (((bitField0_ & 0x00000200) != 0)) { textAnnotations_ = java.util.Collections.unmodifiableList(textAnnotations_); - bitField0_ = (bitField0_ & ~0x00000100); + bitField0_ = (bitField0_ & ~0x00000200); } result.textAnnotations_ = textAnnotations_; } else { result.textAnnotations_ = textAnnotationsBuilder_.build(); } if (objectAnnotationsBuilder_ == null) { - if (((bitField0_ & 0x00000200) != 0)) { + if (((bitField0_ & 0x00000400) != 0)) { objectAnnotations_ = java.util.Collections.unmodifiableList(objectAnnotations_); - bitField0_ = (bitField0_ & ~0x00000200); + bitField0_ = (bitField0_ & ~0x00000400); } result.objectAnnotations_ = objectAnnotations_; } else { result.objectAnnotations_ = objectAnnotationsBuilder_.build(); } if (logoRecognitionAnnotationsBuilder_ == null) { - if (((bitField0_ & 0x00000400) != 0)) { + if (((bitField0_ & 0x00000800) != 0)) { logoRecognitionAnnotations_ = java.util.Collections.unmodifiableList(logoRecognitionAnnotations_); - bitField0_ = (bitField0_ & ~0x00000400); + bitField0_ = (bitField0_ & ~0x00000800); } result.logoRecognitionAnnotations_ = logoRecognitionAnnotations_; } else { result.logoRecognitionAnnotations_ = logoRecognitionAnnotationsBuilder_.build(); } + if (personDetectionAnnotationsBuilder_ == null) { + if (((bitField0_ & 0x00001000) != 0)) { + personDetectionAnnotations_ = + java.util.Collections.unmodifiableList(personDetectionAnnotations_); + bitField0_ = (bitField0_ & ~0x00001000); + } + result.personDetectionAnnotations_ = personDetectionAnnotations_; + } else { + result.personDetectionAnnotations_ = personDetectionAnnotationsBuilder_.build(); + } if (errorBuilder_ == null) { result.error_ = error_; } else { @@ -2351,11 +2634,38 @@ public Builder mergeFrom(com.google.cloud.videointelligence.v1.VideoAnnotationRe } } } + if (faceDetectionAnnotationsBuilder_ == null) { + if (!other.faceDetectionAnnotations_.isEmpty()) { + if (faceDetectionAnnotations_.isEmpty()) { + faceDetectionAnnotations_ = other.faceDetectionAnnotations_; + bitField0_ = (bitField0_ & ~0x00000040); + } else { + ensureFaceDetectionAnnotationsIsMutable(); + faceDetectionAnnotations_.addAll(other.faceDetectionAnnotations_); + } + onChanged(); + } + } else { + if (!other.faceDetectionAnnotations_.isEmpty()) { + if (faceDetectionAnnotationsBuilder_.isEmpty()) { + faceDetectionAnnotationsBuilder_.dispose(); + faceDetectionAnnotationsBuilder_ = null; + faceDetectionAnnotations_ = other.faceDetectionAnnotations_; + bitField0_ = (bitField0_ & ~0x00000040); + faceDetectionAnnotationsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getFaceDetectionAnnotationsFieldBuilder() + : null; + } else { + faceDetectionAnnotationsBuilder_.addAllMessages(other.faceDetectionAnnotations_); + } + } + } if (shotAnnotationsBuilder_ == null) { if (!other.shotAnnotations_.isEmpty()) { if (shotAnnotations_.isEmpty()) { shotAnnotations_ = other.shotAnnotations_; - bitField0_ = (bitField0_ & ~0x00000040); + bitField0_ = (bitField0_ & ~0x00000080); } else { ensureShotAnnotationsIsMutable(); shotAnnotations_.addAll(other.shotAnnotations_); @@ -2368,7 +2678,7 @@ public Builder mergeFrom(com.google.cloud.videointelligence.v1.VideoAnnotationRe shotAnnotationsBuilder_.dispose(); shotAnnotationsBuilder_ = null; shotAnnotations_ = other.shotAnnotations_; - bitField0_ = (bitField0_ & ~0x00000040); + bitField0_ = (bitField0_ & ~0x00000080); shotAnnotationsBuilder_ = com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getShotAnnotationsFieldBuilder() @@ -2385,7 +2695,7 @@ public Builder mergeFrom(com.google.cloud.videointelligence.v1.VideoAnnotationRe if (!other.speechTranscriptions_.isEmpty()) { if (speechTranscriptions_.isEmpty()) { speechTranscriptions_ = other.speechTranscriptions_; - bitField0_ = (bitField0_ & ~0x00000080); + bitField0_ = (bitField0_ & ~0x00000100); } else { ensureSpeechTranscriptionsIsMutable(); speechTranscriptions_.addAll(other.speechTranscriptions_); @@ -2398,7 +2708,7 @@ public Builder mergeFrom(com.google.cloud.videointelligence.v1.VideoAnnotationRe speechTranscriptionsBuilder_.dispose(); speechTranscriptionsBuilder_ = null; speechTranscriptions_ = other.speechTranscriptions_; - bitField0_ = (bitField0_ & ~0x00000080); + bitField0_ = (bitField0_ & ~0x00000100); speechTranscriptionsBuilder_ = com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getSpeechTranscriptionsFieldBuilder() @@ -2412,7 +2722,7 @@ public Builder mergeFrom(com.google.cloud.videointelligence.v1.VideoAnnotationRe if (!other.textAnnotations_.isEmpty()) { if (textAnnotations_.isEmpty()) { textAnnotations_ = other.textAnnotations_; - bitField0_ = (bitField0_ & ~0x00000100); + bitField0_ = (bitField0_ & ~0x00000200); } else { ensureTextAnnotationsIsMutable(); textAnnotations_.addAll(other.textAnnotations_); @@ -2425,7 +2735,7 @@ public Builder mergeFrom(com.google.cloud.videointelligence.v1.VideoAnnotationRe textAnnotationsBuilder_.dispose(); textAnnotationsBuilder_ = null; textAnnotations_ = other.textAnnotations_; - bitField0_ = (bitField0_ & ~0x00000100); + bitField0_ = (bitField0_ & ~0x00000200); textAnnotationsBuilder_ = com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getTextAnnotationsFieldBuilder() @@ -2439,7 +2749,7 @@ public Builder mergeFrom(com.google.cloud.videointelligence.v1.VideoAnnotationRe if (!other.objectAnnotations_.isEmpty()) { if (objectAnnotations_.isEmpty()) { objectAnnotations_ = other.objectAnnotations_; - bitField0_ = (bitField0_ & ~0x00000200); + bitField0_ = (bitField0_ & ~0x00000400); } else { ensureObjectAnnotationsIsMutable(); objectAnnotations_.addAll(other.objectAnnotations_); @@ -2452,7 +2762,7 @@ public Builder mergeFrom(com.google.cloud.videointelligence.v1.VideoAnnotationRe objectAnnotationsBuilder_.dispose(); objectAnnotationsBuilder_ = null; objectAnnotations_ = other.objectAnnotations_; - bitField0_ = (bitField0_ & ~0x00000200); + bitField0_ = (bitField0_ & ~0x00000400); objectAnnotationsBuilder_ = com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getObjectAnnotationsFieldBuilder() @@ -2466,7 +2776,7 @@ public Builder mergeFrom(com.google.cloud.videointelligence.v1.VideoAnnotationRe if (!other.logoRecognitionAnnotations_.isEmpty()) { if (logoRecognitionAnnotations_.isEmpty()) { logoRecognitionAnnotations_ = other.logoRecognitionAnnotations_; - bitField0_ = (bitField0_ & ~0x00000400); + bitField0_ = (bitField0_ & ~0x00000800); } else { ensureLogoRecognitionAnnotationsIsMutable(); logoRecognitionAnnotations_.addAll(other.logoRecognitionAnnotations_); @@ -2479,7 +2789,7 @@ public Builder mergeFrom(com.google.cloud.videointelligence.v1.VideoAnnotationRe logoRecognitionAnnotationsBuilder_.dispose(); logoRecognitionAnnotationsBuilder_ = null; logoRecognitionAnnotations_ = other.logoRecognitionAnnotations_; - bitField0_ = (bitField0_ & ~0x00000400); + bitField0_ = (bitField0_ & ~0x00000800); logoRecognitionAnnotationsBuilder_ = com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getLogoRecognitionAnnotationsFieldBuilder() @@ -2489,8 +2799,35 @@ public Builder mergeFrom(com.google.cloud.videointelligence.v1.VideoAnnotationRe } } } - if (other.hasError()) { - mergeError(other.getError()); + if (personDetectionAnnotationsBuilder_ == null) { + if (!other.personDetectionAnnotations_.isEmpty()) { + if (personDetectionAnnotations_.isEmpty()) { + personDetectionAnnotations_ = other.personDetectionAnnotations_; + bitField0_ = (bitField0_ & ~0x00001000); + } else { + ensurePersonDetectionAnnotationsIsMutable(); + personDetectionAnnotations_.addAll(other.personDetectionAnnotations_); + } + onChanged(); + } + } else { + if (!other.personDetectionAnnotations_.isEmpty()) { + if (personDetectionAnnotationsBuilder_.isEmpty()) { + personDetectionAnnotationsBuilder_.dispose(); + personDetectionAnnotationsBuilder_ = null; + personDetectionAnnotations_ = other.personDetectionAnnotations_; + bitField0_ = (bitField0_ & ~0x00001000); + personDetectionAnnotationsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getPersonDetectionAnnotationsFieldBuilder() + : null; + } else { + personDetectionAnnotationsBuilder_.addAllMessages(other.personDetectionAnnotations_); + } + } + } + if (other.hasError()) { + mergeError(other.getError()); } this.mergeUnknownFields(other.unknownFields); onChanged(); @@ -2530,7 +2867,7 @@ public Builder mergeFrom( * *
      * Video file location in
-     * [Google Cloud Storage](https://cloud.google.com/storage/).
+     * [Cloud Storage](https://cloud.google.com/storage/).
      * 
* * string input_uri = 1; @@ -2553,7 +2890,7 @@ public java.lang.String getInputUri() { * *
      * Video file location in
-     * [Google Cloud Storage](https://cloud.google.com/storage/).
+     * [Cloud Storage](https://cloud.google.com/storage/).
      * 
* * string input_uri = 1; @@ -2576,7 +2913,7 @@ public com.google.protobuf.ByteString getInputUriBytes() { * *
      * Video file location in
-     * [Google Cloud Storage](https://cloud.google.com/storage/).
+     * [Cloud Storage](https://cloud.google.com/storage/).
      * 
* * string input_uri = 1; @@ -2598,7 +2935,7 @@ public Builder setInputUri(java.lang.String value) { * *
      * Video file location in
-     * [Google Cloud Storage](https://cloud.google.com/storage/).
+     * [Cloud Storage](https://cloud.google.com/storage/).
      * 
* * string input_uri = 1; @@ -2616,7 +2953,7 @@ public Builder clearInputUri() { * *
      * Video file location in
-     * [Google Cloud Storage](https://cloud.google.com/storage/).
+     * [Cloud Storage](https://cloud.google.com/storage/).
      * 
* * string input_uri = 1; @@ -2843,7 +3180,7 @@ private void ensureSegmentLabelAnnotationsIsMutable() { * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -2863,7 +3200,7 @@ private void ensureSegmentLabelAnnotationsIsMutable() { * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -2882,7 +3219,7 @@ public int getSegmentLabelAnnotationsCount() { * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -2902,7 +3239,7 @@ public com.google.cloud.videointelligence.v1.LabelAnnotation getSegmentLabelAnno * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -2928,7 +3265,7 @@ public Builder setSegmentLabelAnnotations( * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -2951,7 +3288,7 @@ public Builder setSegmentLabelAnnotations( * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -2977,7 +3314,7 @@ public Builder addSegmentLabelAnnotations( * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3003,7 +3340,7 @@ public Builder addSegmentLabelAnnotations( * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3026,7 +3363,7 @@ public Builder addSegmentLabelAnnotations( * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3049,7 +3386,7 @@ public Builder addSegmentLabelAnnotations( * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3073,7 +3410,7 @@ public Builder addAllSegmentLabelAnnotations( * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3095,7 +3432,7 @@ public Builder clearSegmentLabelAnnotations() { * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3117,7 +3454,7 @@ public Builder removeSegmentLabelAnnotations(int index) { * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3133,7 +3470,7 @@ public Builder removeSegmentLabelAnnotations(int index) { * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3153,7 +3490,7 @@ public Builder removeSegmentLabelAnnotations(int index) { * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3173,7 +3510,7 @@ public Builder removeSegmentLabelAnnotations(int index) { * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3190,7 +3527,7 @@ public Builder removeSegmentLabelAnnotations(int index) { * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3208,7 +3545,7 @@ public Builder removeSegmentLabelAnnotations(int index) { * * *
-     * Topical label annotations on video level or user specified segment level.
+     * Topical label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label.
      * 
* @@ -3263,7 +3600,7 @@ private void ensureSegmentPresenceLabelAnnotationsIsMutable() { * * *
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3287,7 +3624,7 @@ private void ensureSegmentPresenceLabelAnnotationsIsMutable() {
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3310,7 +3647,7 @@ public int getSegmentPresenceLabelAnnotationsCount() {
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3334,7 +3671,7 @@ public com.google.cloud.videointelligence.v1.LabelAnnotation getSegmentPresenceL
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3364,7 +3701,7 @@ public Builder setSegmentPresenceLabelAnnotations(
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3391,7 +3728,7 @@ public Builder setSegmentPresenceLabelAnnotations(
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3421,7 +3758,7 @@ public Builder addSegmentPresenceLabelAnnotations(
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3451,7 +3788,7 @@ public Builder addSegmentPresenceLabelAnnotations(
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3478,7 +3815,7 @@ public Builder addSegmentPresenceLabelAnnotations(
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3505,7 +3842,7 @@ public Builder addSegmentPresenceLabelAnnotations(
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3534,7 +3871,7 @@ public Builder addAllSegmentPresenceLabelAnnotations(
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3560,7 +3897,7 @@ public Builder clearSegmentPresenceLabelAnnotations() {
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3586,7 +3923,7 @@ public Builder removeSegmentPresenceLabelAnnotations(int index) {
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3606,7 +3943,7 @@ public Builder removeSegmentPresenceLabelAnnotations(int index) {
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3630,7 +3967,7 @@ public Builder removeSegmentPresenceLabelAnnotations(int index) {
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3654,7 +3991,7 @@ public Builder removeSegmentPresenceLabelAnnotations(int index) {
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3675,7 +4012,7 @@ public Builder removeSegmentPresenceLabelAnnotations(int index) {
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -3697,7 +4034,7 @@ public Builder removeSegmentPresenceLabelAnnotations(int index) {
      *
      *
      * 
-     * Presence label annotations on video level or user specified segment level.
+     * Presence label annotations on video level or user-specified segment level.
      * There is exactly one element for each unique label. Compared to the
      * existing topical `segment_label_annotations`, this field presents more
      * fine-grained, segment-level labels detected in video content and is made
@@ -5053,11 +5390,14 @@ private void ensureFaceAnnotationsIsMutable() {
      *
      *
      * 
-     * Face annotations. There is exactly one element for each unique face.
+     * Deprecated. Please use `face_detection_annotations` instead.
      * 
* - * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5; + * + * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5 [deprecated = true]; + * */ + @java.lang.Deprecated public java.util.List getFaceAnnotationsList() { if (faceAnnotationsBuilder_ == null) { @@ -5070,11 +5410,14 @@ private void ensureFaceAnnotationsIsMutable() { * * *
-     * Face annotations. There is exactly one element for each unique face.
+     * Deprecated. Please use `face_detection_annotations` instead.
      * 
* - * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5; + * + * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5 [deprecated = true]; + * */ + @java.lang.Deprecated public int getFaceAnnotationsCount() { if (faceAnnotationsBuilder_ == null) { return faceAnnotations_.size(); @@ -5086,11 +5429,14 @@ public int getFaceAnnotationsCount() { * * *
-     * Face annotations. There is exactly one element for each unique face.
+     * Deprecated. Please use `face_detection_annotations` instead.
      * 
* - * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5; + * + * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5 [deprecated = true]; + * */ + @java.lang.Deprecated public com.google.cloud.videointelligence.v1.FaceAnnotation getFaceAnnotations(int index) { if (faceAnnotationsBuilder_ == null) { return faceAnnotations_.get(index); @@ -5102,11 +5448,14 @@ public com.google.cloud.videointelligence.v1.FaceAnnotation getFaceAnnotations(i * * *
-     * Face annotations. There is exactly one element for each unique face.
+     * Deprecated. Please use `face_detection_annotations` instead.
      * 
* - * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5; + * + * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5 [deprecated = true]; + * */ + @java.lang.Deprecated public Builder setFaceAnnotations( int index, com.google.cloud.videointelligence.v1.FaceAnnotation value) { if (faceAnnotationsBuilder_ == null) { @@ -5125,11 +5474,14 @@ public Builder setFaceAnnotations( * * *
-     * Face annotations. There is exactly one element for each unique face.
+     * Deprecated. Please use `face_detection_annotations` instead.
      * 
* - * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5; + * + * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5 [deprecated = true]; + * */ + @java.lang.Deprecated public Builder setFaceAnnotations( int index, com.google.cloud.videointelligence.v1.FaceAnnotation.Builder builderForValue) { if (faceAnnotationsBuilder_ == null) { @@ -5145,11 +5497,14 @@ public Builder setFaceAnnotations( * * *
-     * Face annotations. There is exactly one element for each unique face.
+     * Deprecated. Please use `face_detection_annotations` instead.
      * 
* - * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5; + * + * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5 [deprecated = true]; + * */ + @java.lang.Deprecated public Builder addFaceAnnotations(com.google.cloud.videointelligence.v1.FaceAnnotation value) { if (faceAnnotationsBuilder_ == null) { if (value == null) { @@ -5167,11 +5522,14 @@ public Builder addFaceAnnotations(com.google.cloud.videointelligence.v1.FaceAnno * * *
-     * Face annotations. There is exactly one element for each unique face.
+     * Deprecated. Please use `face_detection_annotations` instead.
      * 
* - * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5; + * + * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5 [deprecated = true]; + * */ + @java.lang.Deprecated public Builder addFaceAnnotations( int index, com.google.cloud.videointelligence.v1.FaceAnnotation value) { if (faceAnnotationsBuilder_ == null) { @@ -5190,11 +5548,14 @@ public Builder addFaceAnnotations( * * *
-     * Face annotations. There is exactly one element for each unique face.
+     * Deprecated. Please use `face_detection_annotations` instead.
      * 
* - * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5; + * + * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5 [deprecated = true]; + * */ + @java.lang.Deprecated public Builder addFaceAnnotations( com.google.cloud.videointelligence.v1.FaceAnnotation.Builder builderForValue) { if (faceAnnotationsBuilder_ == null) { @@ -5210,11 +5571,14 @@ public Builder addFaceAnnotations( * * *
-     * Face annotations. There is exactly one element for each unique face.
+     * Deprecated. Please use `face_detection_annotations` instead.
      * 
* - * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5; + * + * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5 [deprecated = true]; + * */ + @java.lang.Deprecated public Builder addFaceAnnotations( int index, com.google.cloud.videointelligence.v1.FaceAnnotation.Builder builderForValue) { if (faceAnnotationsBuilder_ == null) { @@ -5230,11 +5594,14 @@ public Builder addFaceAnnotations( * * *
-     * Face annotations. There is exactly one element for each unique face.
+     * Deprecated. Please use `face_detection_annotations` instead.
      * 
* - * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5; + * + * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5 [deprecated = true]; + * */ + @java.lang.Deprecated public Builder addAllFaceAnnotations( java.lang.Iterable values) { if (faceAnnotationsBuilder_ == null) { @@ -5250,11 +5617,14 @@ public Builder addAllFaceAnnotations( * * *
-     * Face annotations. There is exactly one element for each unique face.
+     * Deprecated. Please use `face_detection_annotations` instead.
      * 
* - * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5; + * + * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5 [deprecated = true]; + * */ + @java.lang.Deprecated public Builder clearFaceAnnotations() { if (faceAnnotationsBuilder_ == null) { faceAnnotations_ = java.util.Collections.emptyList(); @@ -5269,11 +5639,14 @@ public Builder clearFaceAnnotations() { * * *
-     * Face annotations. There is exactly one element for each unique face.
+     * Deprecated. Please use `face_detection_annotations` instead.
      * 
* - * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5; + * + * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5 [deprecated = true]; + * */ + @java.lang.Deprecated public Builder removeFaceAnnotations(int index) { if (faceAnnotationsBuilder_ == null) { ensureFaceAnnotationsIsMutable(); @@ -5288,11 +5661,14 @@ public Builder removeFaceAnnotations(int index) { * * *
-     * Face annotations. There is exactly one element for each unique face.
+     * Deprecated. Please use `face_detection_annotations` instead.
      * 
* - * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5; + * + * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5 [deprecated = true]; + * */ + @java.lang.Deprecated public com.google.cloud.videointelligence.v1.FaceAnnotation.Builder getFaceAnnotationsBuilder( int index) { return getFaceAnnotationsFieldBuilder().getBuilder(index); @@ -5301,11 +5677,14 @@ public com.google.cloud.videointelligence.v1.FaceAnnotation.Builder getFaceAnnot * * *
-     * Face annotations. There is exactly one element for each unique face.
+     * Deprecated. Please use `face_detection_annotations` instead.
      * 
* - * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5; + * + * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5 [deprecated = true]; + * */ + @java.lang.Deprecated public com.google.cloud.videointelligence.v1.FaceAnnotationOrBuilder getFaceAnnotationsOrBuilder(int index) { if (faceAnnotationsBuilder_ == null) { @@ -5318,11 +5697,14 @@ public com.google.cloud.videointelligence.v1.FaceAnnotation.Builder getFaceAnnot * * *
-     * Face annotations. There is exactly one element for each unique face.
+     * Deprecated. Please use `face_detection_annotations` instead.
      * 
* - * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5; + * + * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5 [deprecated = true]; + * */ + @java.lang.Deprecated public java.util.List getFaceAnnotationsOrBuilderList() { if (faceAnnotationsBuilder_ != null) { @@ -5335,11 +5717,14 @@ public com.google.cloud.videointelligence.v1.FaceAnnotation.Builder getFaceAnnot * * *
-     * Face annotations. There is exactly one element for each unique face.
+     * Deprecated. Please use `face_detection_annotations` instead.
      * 
* - * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5; + * + * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5 [deprecated = true]; + * */ + @java.lang.Deprecated public com.google.cloud.videointelligence.v1.FaceAnnotation.Builder addFaceAnnotationsBuilder() { return getFaceAnnotationsFieldBuilder() @@ -5349,11 +5734,14 @@ public com.google.cloud.videointelligence.v1.FaceAnnotation.Builder getFaceAnnot * * *
-     * Face annotations. There is exactly one element for each unique face.
+     * Deprecated. Please use `face_detection_annotations` instead.
      * 
* - * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5; + * + * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5 [deprecated = true]; + * */ + @java.lang.Deprecated public com.google.cloud.videointelligence.v1.FaceAnnotation.Builder addFaceAnnotationsBuilder( int index) { return getFaceAnnotationsFieldBuilder() @@ -5364,11 +5752,14 @@ public com.google.cloud.videointelligence.v1.FaceAnnotation.Builder addFaceAnnot * * *
-     * Face annotations. There is exactly one element for each unique face.
+     * Deprecated. Please use `face_detection_annotations` instead.
      * 
* - * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5; + * + * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5 [deprecated = true]; + * */ + @java.lang.Deprecated public java.util.List getFaceAnnotationsBuilderList() { return getFaceAnnotationsFieldBuilder().getBuilderList(); @@ -5394,93 +5785,102 @@ public com.google.cloud.videointelligence.v1.FaceAnnotation.Builder addFaceAnnot return faceAnnotationsBuilder_; } - private java.util.List shotAnnotations_ = - java.util.Collections.emptyList(); + private java.util.List + faceDetectionAnnotations_ = java.util.Collections.emptyList(); - private void ensureShotAnnotationsIsMutable() { + private void ensureFaceDetectionAnnotationsIsMutable() { if (!((bitField0_ & 0x00000040) != 0)) { - shotAnnotations_ = - new java.util.ArrayList( - shotAnnotations_); + faceDetectionAnnotations_ = + new java.util.ArrayList( + faceDetectionAnnotations_); bitField0_ |= 0x00000040; } } private com.google.protobuf.RepeatedFieldBuilderV3< - com.google.cloud.videointelligence.v1.VideoSegment, - com.google.cloud.videointelligence.v1.VideoSegment.Builder, - com.google.cloud.videointelligence.v1.VideoSegmentOrBuilder> - shotAnnotationsBuilder_; + com.google.cloud.videointelligence.v1.FaceDetectionAnnotation, + com.google.cloud.videointelligence.v1.FaceDetectionAnnotation.Builder, + com.google.cloud.videointelligence.v1.FaceDetectionAnnotationOrBuilder> + faceDetectionAnnotationsBuilder_; /** * * *
-     * Shot annotations. Each shot is represented as a video segment.
+     * Face detection annotations.
      * 
* - * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + * + * repeated .google.cloud.videointelligence.v1.FaceDetectionAnnotation face_detection_annotations = 13; + * */ - public java.util.List - getShotAnnotationsList() { - if (shotAnnotationsBuilder_ == null) { - return java.util.Collections.unmodifiableList(shotAnnotations_); + public java.util.List + getFaceDetectionAnnotationsList() { + if (faceDetectionAnnotationsBuilder_ == null) { + return java.util.Collections.unmodifiableList(faceDetectionAnnotations_); } else { - return shotAnnotationsBuilder_.getMessageList(); + return faceDetectionAnnotationsBuilder_.getMessageList(); } } /** * * *
-     * Shot annotations. Each shot is represented as a video segment.
+     * Face detection annotations.
      * 
* - * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + * + * repeated .google.cloud.videointelligence.v1.FaceDetectionAnnotation face_detection_annotations = 13; + * */ - public int getShotAnnotationsCount() { - if (shotAnnotationsBuilder_ == null) { - return shotAnnotations_.size(); + public int getFaceDetectionAnnotationsCount() { + if (faceDetectionAnnotationsBuilder_ == null) { + return faceDetectionAnnotations_.size(); } else { - return shotAnnotationsBuilder_.getCount(); + return faceDetectionAnnotationsBuilder_.getCount(); } } /** * * *
-     * Shot annotations. Each shot is represented as a video segment.
+     * Face detection annotations.
      * 
* - * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + * + * repeated .google.cloud.videointelligence.v1.FaceDetectionAnnotation face_detection_annotations = 13; + * */ - public com.google.cloud.videointelligence.v1.VideoSegment getShotAnnotations(int index) { - if (shotAnnotationsBuilder_ == null) { - return shotAnnotations_.get(index); + public com.google.cloud.videointelligence.v1.FaceDetectionAnnotation + getFaceDetectionAnnotations(int index) { + if (faceDetectionAnnotationsBuilder_ == null) { + return faceDetectionAnnotations_.get(index); } else { - return shotAnnotationsBuilder_.getMessage(index); + return faceDetectionAnnotationsBuilder_.getMessage(index); } } /** * * *
-     * Shot annotations. Each shot is represented as a video segment.
+     * Face detection annotations.
      * 
* - * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + * + * repeated .google.cloud.videointelligence.v1.FaceDetectionAnnotation face_detection_annotations = 13; + * */ - public Builder setShotAnnotations( - int index, com.google.cloud.videointelligence.v1.VideoSegment value) { - if (shotAnnotationsBuilder_ == null) { + public Builder setFaceDetectionAnnotations( + int index, com.google.cloud.videointelligence.v1.FaceDetectionAnnotation value) { + if (faceDetectionAnnotationsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureShotAnnotationsIsMutable(); - shotAnnotations_.set(index, value); + ensureFaceDetectionAnnotationsIsMutable(); + faceDetectionAnnotations_.set(index, value); onChanged(); } else { - shotAnnotationsBuilder_.setMessage(index, value); + faceDetectionAnnotationsBuilder_.setMessage(index, value); } return this; } @@ -5488,19 +5888,22 @@ public Builder setShotAnnotations( * * *
-     * Shot annotations. Each shot is represented as a video segment.
+     * Face detection annotations.
      * 
* - * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + * + * repeated .google.cloud.videointelligence.v1.FaceDetectionAnnotation face_detection_annotations = 13; + * */ - public Builder setShotAnnotations( - int index, com.google.cloud.videointelligence.v1.VideoSegment.Builder builderForValue) { - if (shotAnnotationsBuilder_ == null) { - ensureShotAnnotationsIsMutable(); - shotAnnotations_.set(index, builderForValue.build()); + public Builder setFaceDetectionAnnotations( + int index, + com.google.cloud.videointelligence.v1.FaceDetectionAnnotation.Builder builderForValue) { + if (faceDetectionAnnotationsBuilder_ == null) { + ensureFaceDetectionAnnotationsIsMutable(); + faceDetectionAnnotations_.set(index, builderForValue.build()); onChanged(); } else { - shotAnnotationsBuilder_.setMessage(index, builderForValue.build()); + faceDetectionAnnotationsBuilder_.setMessage(index, builderForValue.build()); } return this; } @@ -5508,21 +5911,24 @@ public Builder setShotAnnotations( * * *
-     * Shot annotations. Each shot is represented as a video segment.
+     * Face detection annotations.
      * 
* - * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + * + * repeated .google.cloud.videointelligence.v1.FaceDetectionAnnotation face_detection_annotations = 13; + * */ - public Builder addShotAnnotations(com.google.cloud.videointelligence.v1.VideoSegment value) { - if (shotAnnotationsBuilder_ == null) { + public Builder addFaceDetectionAnnotations( + com.google.cloud.videointelligence.v1.FaceDetectionAnnotation value) { + if (faceDetectionAnnotationsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureShotAnnotationsIsMutable(); - shotAnnotations_.add(value); + ensureFaceDetectionAnnotationsIsMutable(); + faceDetectionAnnotations_.add(value); onChanged(); } else { - shotAnnotationsBuilder_.addMessage(value); + faceDetectionAnnotationsBuilder_.addMessage(value); } return this; } @@ -5530,22 +5936,24 @@ public Builder addShotAnnotations(com.google.cloud.videointelligence.v1.VideoSeg * * *
-     * Shot annotations. Each shot is represented as a video segment.
+     * Face detection annotations.
      * 
* - * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + * + * repeated .google.cloud.videointelligence.v1.FaceDetectionAnnotation face_detection_annotations = 13; + * */ - public Builder addShotAnnotations( - int index, com.google.cloud.videointelligence.v1.VideoSegment value) { - if (shotAnnotationsBuilder_ == null) { + public Builder addFaceDetectionAnnotations( + int index, com.google.cloud.videointelligence.v1.FaceDetectionAnnotation value) { + if (faceDetectionAnnotationsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureShotAnnotationsIsMutable(); - shotAnnotations_.add(index, value); + ensureFaceDetectionAnnotationsIsMutable(); + faceDetectionAnnotations_.add(index, value); onChanged(); } else { - shotAnnotationsBuilder_.addMessage(index, value); + faceDetectionAnnotationsBuilder_.addMessage(index, value); } return this; } @@ -5553,19 +5961,21 @@ public Builder addShotAnnotations( * * *
-     * Shot annotations. Each shot is represented as a video segment.
+     * Face detection annotations.
      * 
* - * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + * + * repeated .google.cloud.videointelligence.v1.FaceDetectionAnnotation face_detection_annotations = 13; + * */ - public Builder addShotAnnotations( - com.google.cloud.videointelligence.v1.VideoSegment.Builder builderForValue) { - if (shotAnnotationsBuilder_ == null) { - ensureShotAnnotationsIsMutable(); - shotAnnotations_.add(builderForValue.build()); + public Builder addFaceDetectionAnnotations( + com.google.cloud.videointelligence.v1.FaceDetectionAnnotation.Builder builderForValue) { + if (faceDetectionAnnotationsBuilder_ == null) { + ensureFaceDetectionAnnotationsIsMutable(); + faceDetectionAnnotations_.add(builderForValue.build()); onChanged(); } else { - shotAnnotationsBuilder_.addMessage(builderForValue.build()); + faceDetectionAnnotationsBuilder_.addMessage(builderForValue.build()); } return this; } @@ -5573,19 +5983,22 @@ public Builder addShotAnnotations( * * *
-     * Shot annotations. Each shot is represented as a video segment.
+     * Face detection annotations.
      * 
* - * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + * + * repeated .google.cloud.videointelligence.v1.FaceDetectionAnnotation face_detection_annotations = 13; + * */ - public Builder addShotAnnotations( - int index, com.google.cloud.videointelligence.v1.VideoSegment.Builder builderForValue) { - if (shotAnnotationsBuilder_ == null) { - ensureShotAnnotationsIsMutable(); - shotAnnotations_.add(index, builderForValue.build()); + public Builder addFaceDetectionAnnotations( + int index, + com.google.cloud.videointelligence.v1.FaceDetectionAnnotation.Builder builderForValue) { + if (faceDetectionAnnotationsBuilder_ == null) { + ensureFaceDetectionAnnotationsIsMutable(); + faceDetectionAnnotations_.add(index, builderForValue.build()); onChanged(); } else { - shotAnnotationsBuilder_.addMessage(index, builderForValue.build()); + faceDetectionAnnotationsBuilder_.addMessage(index, builderForValue.build()); } return this; } @@ -5593,19 +6006,22 @@ public Builder addShotAnnotations( * * *
-     * Shot annotations. Each shot is represented as a video segment.
+     * Face detection annotations.
      * 
* - * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + * + * repeated .google.cloud.videointelligence.v1.FaceDetectionAnnotation face_detection_annotations = 13; + * */ - public Builder addAllShotAnnotations( - java.lang.Iterable values) { - if (shotAnnotationsBuilder_ == null) { - ensureShotAnnotationsIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll(values, shotAnnotations_); + public Builder addAllFaceDetectionAnnotations( + java.lang.Iterable + values) { + if (faceDetectionAnnotationsBuilder_ == null) { + ensureFaceDetectionAnnotationsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, faceDetectionAnnotations_); onChanged(); } else { - shotAnnotationsBuilder_.addAllMessages(values); + faceDetectionAnnotationsBuilder_.addAllMessages(values); } return this; } @@ -5613,18 +6029,20 @@ public Builder addAllShotAnnotations( * * *
-     * Shot annotations. Each shot is represented as a video segment.
+     * Face detection annotations.
      * 
* - * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + * + * repeated .google.cloud.videointelligence.v1.FaceDetectionAnnotation face_detection_annotations = 13; + * */ - public Builder clearShotAnnotations() { - if (shotAnnotationsBuilder_ == null) { - shotAnnotations_ = java.util.Collections.emptyList(); + public Builder clearFaceDetectionAnnotations() { + if (faceDetectionAnnotationsBuilder_ == null) { + faceDetectionAnnotations_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000040); onChanged(); } else { - shotAnnotationsBuilder_.clear(); + faceDetectionAnnotationsBuilder_.clear(); } return this; } @@ -5632,18 +6050,20 @@ public Builder clearShotAnnotations() { * * *
-     * Shot annotations. Each shot is represented as a video segment.
+     * Face detection annotations.
      * 
* - * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + * + * repeated .google.cloud.videointelligence.v1.FaceDetectionAnnotation face_detection_annotations = 13; + * */ - public Builder removeShotAnnotations(int index) { - if (shotAnnotationsBuilder_ == null) { - ensureShotAnnotationsIsMutable(); - shotAnnotations_.remove(index); + public Builder removeFaceDetectionAnnotations(int index) { + if (faceDetectionAnnotationsBuilder_ == null) { + ensureFaceDetectionAnnotationsIsMutable(); + faceDetectionAnnotations_.remove(index); onChanged(); } else { - shotAnnotationsBuilder_.remove(index); + faceDetectionAnnotationsBuilder_.remove(index); } return this; } @@ -5651,407 +6071,1195 @@ public Builder removeShotAnnotations(int index) { * * *
-     * Shot annotations. Each shot is represented as a video segment.
+     * Face detection annotations.
      * 
* - * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + * + * repeated .google.cloud.videointelligence.v1.FaceDetectionAnnotation face_detection_annotations = 13; + * */ - public com.google.cloud.videointelligence.v1.VideoSegment.Builder getShotAnnotationsBuilder( - int index) { - return getShotAnnotationsFieldBuilder().getBuilder(index); + public com.google.cloud.videointelligence.v1.FaceDetectionAnnotation.Builder + getFaceDetectionAnnotationsBuilder(int index) { + return getFaceDetectionAnnotationsFieldBuilder().getBuilder(index); } /** * * *
-     * Shot annotations. Each shot is represented as a video segment.
+     * Face detection annotations.
      * 
* - * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + * + * repeated .google.cloud.videointelligence.v1.FaceDetectionAnnotation face_detection_annotations = 13; + * */ - public com.google.cloud.videointelligence.v1.VideoSegmentOrBuilder getShotAnnotationsOrBuilder( - int index) { - if (shotAnnotationsBuilder_ == null) { - return shotAnnotations_.get(index); + public com.google.cloud.videointelligence.v1.FaceDetectionAnnotationOrBuilder + getFaceDetectionAnnotationsOrBuilder(int index) { + if (faceDetectionAnnotationsBuilder_ == null) { + return faceDetectionAnnotations_.get(index); } else { - return shotAnnotationsBuilder_.getMessageOrBuilder(index); + return faceDetectionAnnotationsBuilder_.getMessageOrBuilder(index); } } /** * * *
-     * Shot annotations. Each shot is represented as a video segment.
+     * Face detection annotations.
      * 
* - * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + * + * repeated .google.cloud.videointelligence.v1.FaceDetectionAnnotation face_detection_annotations = 13; + * */ - public java.util.List - getShotAnnotationsOrBuilderList() { - if (shotAnnotationsBuilder_ != null) { - return shotAnnotationsBuilder_.getMessageOrBuilderList(); + public java.util.List< + ? extends com.google.cloud.videointelligence.v1.FaceDetectionAnnotationOrBuilder> + getFaceDetectionAnnotationsOrBuilderList() { + if (faceDetectionAnnotationsBuilder_ != null) { + return faceDetectionAnnotationsBuilder_.getMessageOrBuilderList(); } else { - return java.util.Collections.unmodifiableList(shotAnnotations_); + return java.util.Collections.unmodifiableList(faceDetectionAnnotations_); } } /** * * *
-     * Shot annotations. Each shot is represented as a video segment.
+     * Face detection annotations.
      * 
* - * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + * + * repeated .google.cloud.videointelligence.v1.FaceDetectionAnnotation face_detection_annotations = 13; + * */ - public com.google.cloud.videointelligence.v1.VideoSegment.Builder addShotAnnotationsBuilder() { - return getShotAnnotationsFieldBuilder() - .addBuilder(com.google.cloud.videointelligence.v1.VideoSegment.getDefaultInstance()); + public com.google.cloud.videointelligence.v1.FaceDetectionAnnotation.Builder + addFaceDetectionAnnotationsBuilder() { + return getFaceDetectionAnnotationsFieldBuilder() + .addBuilder( + com.google.cloud.videointelligence.v1.FaceDetectionAnnotation.getDefaultInstance()); } /** * * *
-     * Shot annotations. Each shot is represented as a video segment.
+     * Face detection annotations.
      * 
* - * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + * + * repeated .google.cloud.videointelligence.v1.FaceDetectionAnnotation face_detection_annotations = 13; + * */ - public com.google.cloud.videointelligence.v1.VideoSegment.Builder addShotAnnotationsBuilder( - int index) { - return getShotAnnotationsFieldBuilder() + public com.google.cloud.videointelligence.v1.FaceDetectionAnnotation.Builder + addFaceDetectionAnnotationsBuilder(int index) { + return getFaceDetectionAnnotationsFieldBuilder() .addBuilder( - index, com.google.cloud.videointelligence.v1.VideoSegment.getDefaultInstance()); + index, + com.google.cloud.videointelligence.v1.FaceDetectionAnnotation.getDefaultInstance()); } /** * * *
-     * Shot annotations. Each shot is represented as a video segment.
+     * Face detection annotations.
      * 
* - * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + * + * repeated .google.cloud.videointelligence.v1.FaceDetectionAnnotation face_detection_annotations = 13; + * */ - public java.util.List - getShotAnnotationsBuilderList() { - return getShotAnnotationsFieldBuilder().getBuilderList(); + public java.util.List + getFaceDetectionAnnotationsBuilderList() { + return getFaceDetectionAnnotationsFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilderV3< - com.google.cloud.videointelligence.v1.VideoSegment, - com.google.cloud.videointelligence.v1.VideoSegment.Builder, - com.google.cloud.videointelligence.v1.VideoSegmentOrBuilder> - getShotAnnotationsFieldBuilder() { - if (shotAnnotationsBuilder_ == null) { - shotAnnotationsBuilder_ = + com.google.cloud.videointelligence.v1.FaceDetectionAnnotation, + com.google.cloud.videointelligence.v1.FaceDetectionAnnotation.Builder, + com.google.cloud.videointelligence.v1.FaceDetectionAnnotationOrBuilder> + getFaceDetectionAnnotationsFieldBuilder() { + if (faceDetectionAnnotationsBuilder_ == null) { + faceDetectionAnnotationsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< - com.google.cloud.videointelligence.v1.VideoSegment, - com.google.cloud.videointelligence.v1.VideoSegment.Builder, - com.google.cloud.videointelligence.v1.VideoSegmentOrBuilder>( - shotAnnotations_, + com.google.cloud.videointelligence.v1.FaceDetectionAnnotation, + com.google.cloud.videointelligence.v1.FaceDetectionAnnotation.Builder, + com.google.cloud.videointelligence.v1.FaceDetectionAnnotationOrBuilder>( + faceDetectionAnnotations_, ((bitField0_ & 0x00000040) != 0), getParentForChildren(), isClean()); - shotAnnotations_ = null; + faceDetectionAnnotations_ = null; } - return shotAnnotationsBuilder_; + return faceDetectionAnnotationsBuilder_; } - private com.google.cloud.videointelligence.v1.ExplicitContentAnnotation explicitAnnotation_; - private com.google.protobuf.SingleFieldBuilderV3< - com.google.cloud.videointelligence.v1.ExplicitContentAnnotation, - com.google.cloud.videointelligence.v1.ExplicitContentAnnotation.Builder, - com.google.cloud.videointelligence.v1.ExplicitContentAnnotationOrBuilder> - explicitAnnotationBuilder_; - /** - * - * - *
-     * Explicit content annotation.
-     * 
- * - * .google.cloud.videointelligence.v1.ExplicitContentAnnotation explicit_annotation = 7; - * - * - * @return Whether the explicitAnnotation field is set. - */ - public boolean hasExplicitAnnotation() { - return explicitAnnotationBuilder_ != null || explicitAnnotation_ != null; - } + private java.util.List shotAnnotations_ = + java.util.Collections.emptyList(); + + private void ensureShotAnnotationsIsMutable() { + if (!((bitField0_ & 0x00000080) != 0)) { + shotAnnotations_ = + new java.util.ArrayList( + shotAnnotations_); + bitField0_ |= 0x00000080; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.videointelligence.v1.VideoSegment, + com.google.cloud.videointelligence.v1.VideoSegment.Builder, + com.google.cloud.videointelligence.v1.VideoSegmentOrBuilder> + shotAnnotationsBuilder_; + + /** + * + * + *
+     * Shot annotations. Each shot is represented as a video segment.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + */ + public java.util.List + getShotAnnotationsList() { + if (shotAnnotationsBuilder_ == null) { + return java.util.Collections.unmodifiableList(shotAnnotations_); + } else { + return shotAnnotationsBuilder_.getMessageList(); + } + } + /** + * + * + *
+     * Shot annotations. Each shot is represented as a video segment.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + */ + public int getShotAnnotationsCount() { + if (shotAnnotationsBuilder_ == null) { + return shotAnnotations_.size(); + } else { + return shotAnnotationsBuilder_.getCount(); + } + } + /** + * + * + *
+     * Shot annotations. Each shot is represented as a video segment.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + */ + public com.google.cloud.videointelligence.v1.VideoSegment getShotAnnotations(int index) { + if (shotAnnotationsBuilder_ == null) { + return shotAnnotations_.get(index); + } else { + return shotAnnotationsBuilder_.getMessage(index); + } + } + /** + * + * + *
+     * Shot annotations. Each shot is represented as a video segment.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + */ + public Builder setShotAnnotations( + int index, com.google.cloud.videointelligence.v1.VideoSegment value) { + if (shotAnnotationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureShotAnnotationsIsMutable(); + shotAnnotations_.set(index, value); + onChanged(); + } else { + shotAnnotationsBuilder_.setMessage(index, value); + } + return this; + } + /** + * + * + *
+     * Shot annotations. Each shot is represented as a video segment.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + */ + public Builder setShotAnnotations( + int index, com.google.cloud.videointelligence.v1.VideoSegment.Builder builderForValue) { + if (shotAnnotationsBuilder_ == null) { + ensureShotAnnotationsIsMutable(); + shotAnnotations_.set(index, builderForValue.build()); + onChanged(); + } else { + shotAnnotationsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * Shot annotations. Each shot is represented as a video segment.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + */ + public Builder addShotAnnotations(com.google.cloud.videointelligence.v1.VideoSegment value) { + if (shotAnnotationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureShotAnnotationsIsMutable(); + shotAnnotations_.add(value); + onChanged(); + } else { + shotAnnotationsBuilder_.addMessage(value); + } + return this; + } + /** + * + * + *
+     * Shot annotations. Each shot is represented as a video segment.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + */ + public Builder addShotAnnotations( + int index, com.google.cloud.videointelligence.v1.VideoSegment value) { + if (shotAnnotationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureShotAnnotationsIsMutable(); + shotAnnotations_.add(index, value); + onChanged(); + } else { + shotAnnotationsBuilder_.addMessage(index, value); + } + return this; + } + /** + * + * + *
+     * Shot annotations. Each shot is represented as a video segment.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + */ + public Builder addShotAnnotations( + com.google.cloud.videointelligence.v1.VideoSegment.Builder builderForValue) { + if (shotAnnotationsBuilder_ == null) { + ensureShotAnnotationsIsMutable(); + shotAnnotations_.add(builderForValue.build()); + onChanged(); + } else { + shotAnnotationsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * Shot annotations. Each shot is represented as a video segment.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + */ + public Builder addShotAnnotations( + int index, com.google.cloud.videointelligence.v1.VideoSegment.Builder builderForValue) { + if (shotAnnotationsBuilder_ == null) { + ensureShotAnnotationsIsMutable(); + shotAnnotations_.add(index, builderForValue.build()); + onChanged(); + } else { + shotAnnotationsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * Shot annotations. Each shot is represented as a video segment.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + */ + public Builder addAllShotAnnotations( + java.lang.Iterable values) { + if (shotAnnotationsBuilder_ == null) { + ensureShotAnnotationsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, shotAnnotations_); + onChanged(); + } else { + shotAnnotationsBuilder_.addAllMessages(values); + } + return this; + } + /** + * + * + *
+     * Shot annotations. Each shot is represented as a video segment.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + */ + public Builder clearShotAnnotations() { + if (shotAnnotationsBuilder_ == null) { + shotAnnotations_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000080); + onChanged(); + } else { + shotAnnotationsBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Shot annotations. Each shot is represented as a video segment.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + */ + public Builder removeShotAnnotations(int index) { + if (shotAnnotationsBuilder_ == null) { + ensureShotAnnotationsIsMutable(); + shotAnnotations_.remove(index); + onChanged(); + } else { + shotAnnotationsBuilder_.remove(index); + } + return this; + } + /** + * + * + *
+     * Shot annotations. Each shot is represented as a video segment.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + */ + public com.google.cloud.videointelligence.v1.VideoSegment.Builder getShotAnnotationsBuilder( + int index) { + return getShotAnnotationsFieldBuilder().getBuilder(index); + } + /** + * + * + *
+     * Shot annotations. Each shot is represented as a video segment.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + */ + public com.google.cloud.videointelligence.v1.VideoSegmentOrBuilder getShotAnnotationsOrBuilder( + int index) { + if (shotAnnotationsBuilder_ == null) { + return shotAnnotations_.get(index); + } else { + return shotAnnotationsBuilder_.getMessageOrBuilder(index); + } + } + /** + * + * + *
+     * Shot annotations. Each shot is represented as a video segment.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + */ + public java.util.List + getShotAnnotationsOrBuilderList() { + if (shotAnnotationsBuilder_ != null) { + return shotAnnotationsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(shotAnnotations_); + } + } + /** + * + * + *
+     * Shot annotations. Each shot is represented as a video segment.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + */ + public com.google.cloud.videointelligence.v1.VideoSegment.Builder addShotAnnotationsBuilder() { + return getShotAnnotationsFieldBuilder() + .addBuilder(com.google.cloud.videointelligence.v1.VideoSegment.getDefaultInstance()); + } + /** + * + * + *
+     * Shot annotations. Each shot is represented as a video segment.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + */ + public com.google.cloud.videointelligence.v1.VideoSegment.Builder addShotAnnotationsBuilder( + int index) { + return getShotAnnotationsFieldBuilder() + .addBuilder( + index, com.google.cloud.videointelligence.v1.VideoSegment.getDefaultInstance()); + } + /** + * + * + *
+     * Shot annotations. Each shot is represented as a video segment.
+     * 
+ * + * repeated .google.cloud.videointelligence.v1.VideoSegment shot_annotations = 6; + */ + public java.util.List + getShotAnnotationsBuilderList() { + return getShotAnnotationsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.videointelligence.v1.VideoSegment, + com.google.cloud.videointelligence.v1.VideoSegment.Builder, + com.google.cloud.videointelligence.v1.VideoSegmentOrBuilder> + getShotAnnotationsFieldBuilder() { + if (shotAnnotationsBuilder_ == null) { + shotAnnotationsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.videointelligence.v1.VideoSegment, + com.google.cloud.videointelligence.v1.VideoSegment.Builder, + com.google.cloud.videointelligence.v1.VideoSegmentOrBuilder>( + shotAnnotations_, + ((bitField0_ & 0x00000080) != 0), + getParentForChildren(), + isClean()); + shotAnnotations_ = null; + } + return shotAnnotationsBuilder_; + } + + private com.google.cloud.videointelligence.v1.ExplicitContentAnnotation explicitAnnotation_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.videointelligence.v1.ExplicitContentAnnotation, + com.google.cloud.videointelligence.v1.ExplicitContentAnnotation.Builder, + com.google.cloud.videointelligence.v1.ExplicitContentAnnotationOrBuilder> + explicitAnnotationBuilder_; + /** + * + * + *
+     * Explicit content annotation.
+     * 
+ * + * .google.cloud.videointelligence.v1.ExplicitContentAnnotation explicit_annotation = 7; + * + * + * @return Whether the explicitAnnotation field is set. + */ + public boolean hasExplicitAnnotation() { + return explicitAnnotationBuilder_ != null || explicitAnnotation_ != null; + } + /** + * + * + *
+     * Explicit content annotation.
+     * 
+ * + * .google.cloud.videointelligence.v1.ExplicitContentAnnotation explicit_annotation = 7; + * + * + * @return The explicitAnnotation. + */ + public com.google.cloud.videointelligence.v1.ExplicitContentAnnotation getExplicitAnnotation() { + if (explicitAnnotationBuilder_ == null) { + return explicitAnnotation_ == null + ? com.google.cloud.videointelligence.v1.ExplicitContentAnnotation.getDefaultInstance() + : explicitAnnotation_; + } else { + return explicitAnnotationBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Explicit content annotation.
+     * 
+ * + * .google.cloud.videointelligence.v1.ExplicitContentAnnotation explicit_annotation = 7; + * + */ + public Builder setExplicitAnnotation( + com.google.cloud.videointelligence.v1.ExplicitContentAnnotation value) { + if (explicitAnnotationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + explicitAnnotation_ = value; + onChanged(); + } else { + explicitAnnotationBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Explicit content annotation.
+     * 
+ * + * .google.cloud.videointelligence.v1.ExplicitContentAnnotation explicit_annotation = 7; + * + */ + public Builder setExplicitAnnotation( + com.google.cloud.videointelligence.v1.ExplicitContentAnnotation.Builder builderForValue) { + if (explicitAnnotationBuilder_ == null) { + explicitAnnotation_ = builderForValue.build(); + onChanged(); + } else { + explicitAnnotationBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Explicit content annotation.
+     * 
+ * + * .google.cloud.videointelligence.v1.ExplicitContentAnnotation explicit_annotation = 7; + * + */ + public Builder mergeExplicitAnnotation( + com.google.cloud.videointelligence.v1.ExplicitContentAnnotation value) { + if (explicitAnnotationBuilder_ == null) { + if (explicitAnnotation_ != null) { + explicitAnnotation_ = + com.google.cloud.videointelligence.v1.ExplicitContentAnnotation.newBuilder( + explicitAnnotation_) + .mergeFrom(value) + .buildPartial(); + } else { + explicitAnnotation_ = value; + } + onChanged(); + } else { + explicitAnnotationBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Explicit content annotation.
+     * 
+ * + * .google.cloud.videointelligence.v1.ExplicitContentAnnotation explicit_annotation = 7; + * + */ + public Builder clearExplicitAnnotation() { + if (explicitAnnotationBuilder_ == null) { + explicitAnnotation_ = null; + onChanged(); + } else { + explicitAnnotation_ = null; + explicitAnnotationBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Explicit content annotation.
+     * 
+ * + * .google.cloud.videointelligence.v1.ExplicitContentAnnotation explicit_annotation = 7; + * + */ + public com.google.cloud.videointelligence.v1.ExplicitContentAnnotation.Builder + getExplicitAnnotationBuilder() { + + onChanged(); + return getExplicitAnnotationFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Explicit content annotation.
+     * 
+ * + * .google.cloud.videointelligence.v1.ExplicitContentAnnotation explicit_annotation = 7; + * + */ + public com.google.cloud.videointelligence.v1.ExplicitContentAnnotationOrBuilder + getExplicitAnnotationOrBuilder() { + if (explicitAnnotationBuilder_ != null) { + return explicitAnnotationBuilder_.getMessageOrBuilder(); + } else { + return explicitAnnotation_ == null + ? com.google.cloud.videointelligence.v1.ExplicitContentAnnotation.getDefaultInstance() + : explicitAnnotation_; + } + } + /** + * + * + *
+     * Explicit content annotation.
+     * 
+ * + * .google.cloud.videointelligence.v1.ExplicitContentAnnotation explicit_annotation = 7; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.videointelligence.v1.ExplicitContentAnnotation, + com.google.cloud.videointelligence.v1.ExplicitContentAnnotation.Builder, + com.google.cloud.videointelligence.v1.ExplicitContentAnnotationOrBuilder> + getExplicitAnnotationFieldBuilder() { + if (explicitAnnotationBuilder_ == null) { + explicitAnnotationBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.videointelligence.v1.ExplicitContentAnnotation, + com.google.cloud.videointelligence.v1.ExplicitContentAnnotation.Builder, + com.google.cloud.videointelligence.v1.ExplicitContentAnnotationOrBuilder>( + getExplicitAnnotation(), getParentForChildren(), isClean()); + explicitAnnotation_ = null; + } + return explicitAnnotationBuilder_; + } + + private java.util.List + speechTranscriptions_ = java.util.Collections.emptyList(); + + private void ensureSpeechTranscriptionsIsMutable() { + if (!((bitField0_ & 0x00000100) != 0)) { + speechTranscriptions_ = + new java.util.ArrayList( + speechTranscriptions_); + bitField0_ |= 0x00000100; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.videointelligence.v1.SpeechTranscription, + com.google.cloud.videointelligence.v1.SpeechTranscription.Builder, + com.google.cloud.videointelligence.v1.SpeechTranscriptionOrBuilder> + speechTranscriptionsBuilder_; + + /** + * + * + *
+     * Speech transcription.
+     * 
+ * + * + * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; + * + */ + public java.util.List + getSpeechTranscriptionsList() { + if (speechTranscriptionsBuilder_ == null) { + return java.util.Collections.unmodifiableList(speechTranscriptions_); + } else { + return speechTranscriptionsBuilder_.getMessageList(); + } + } /** * * *
-     * Explicit content annotation.
+     * Speech transcription.
      * 
* - * .google.cloud.videointelligence.v1.ExplicitContentAnnotation explicit_annotation = 7; + * + * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; * + */ + public int getSpeechTranscriptionsCount() { + if (speechTranscriptionsBuilder_ == null) { + return speechTranscriptions_.size(); + } else { + return speechTranscriptionsBuilder_.getCount(); + } + } + /** * - * @return The explicitAnnotation. + * + *
+     * Speech transcription.
+     * 
+ * + * + * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; + * */ - public com.google.cloud.videointelligence.v1.ExplicitContentAnnotation getExplicitAnnotation() { - if (explicitAnnotationBuilder_ == null) { - return explicitAnnotation_ == null - ? com.google.cloud.videointelligence.v1.ExplicitContentAnnotation.getDefaultInstance() - : explicitAnnotation_; + public com.google.cloud.videointelligence.v1.SpeechTranscription getSpeechTranscriptions( + int index) { + if (speechTranscriptionsBuilder_ == null) { + return speechTranscriptions_.get(index); } else { - return explicitAnnotationBuilder_.getMessage(); + return speechTranscriptionsBuilder_.getMessage(index); } } /** * * *
-     * Explicit content annotation.
+     * Speech transcription.
      * 
* - * .google.cloud.videointelligence.v1.ExplicitContentAnnotation explicit_annotation = 7; + * + * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; * */ - public Builder setExplicitAnnotation( - com.google.cloud.videointelligence.v1.ExplicitContentAnnotation value) { - if (explicitAnnotationBuilder_ == null) { + public Builder setSpeechTranscriptions( + int index, com.google.cloud.videointelligence.v1.SpeechTranscription value) { + if (speechTranscriptionsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - explicitAnnotation_ = value; + ensureSpeechTranscriptionsIsMutable(); + speechTranscriptions_.set(index, value); onChanged(); } else { - explicitAnnotationBuilder_.setMessage(value); + speechTranscriptionsBuilder_.setMessage(index, value); } - return this; } /** * * *
-     * Explicit content annotation.
+     * Speech transcription.
      * 
* - * .google.cloud.videointelligence.v1.ExplicitContentAnnotation explicit_annotation = 7; + * + * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; * */ - public Builder setExplicitAnnotation( - com.google.cloud.videointelligence.v1.ExplicitContentAnnotation.Builder builderForValue) { - if (explicitAnnotationBuilder_ == null) { - explicitAnnotation_ = builderForValue.build(); + public Builder setSpeechTranscriptions( + int index, + com.google.cloud.videointelligence.v1.SpeechTranscription.Builder builderForValue) { + if (speechTranscriptionsBuilder_ == null) { + ensureSpeechTranscriptionsIsMutable(); + speechTranscriptions_.set(index, builderForValue.build()); onChanged(); } else { - explicitAnnotationBuilder_.setMessage(builderForValue.build()); + speechTranscriptionsBuilder_.setMessage(index, builderForValue.build()); } - return this; } /** * * *
-     * Explicit content annotation.
+     * Speech transcription.
      * 
* - * .google.cloud.videointelligence.v1.ExplicitContentAnnotation explicit_annotation = 7; + * + * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; * */ - public Builder mergeExplicitAnnotation( - com.google.cloud.videointelligence.v1.ExplicitContentAnnotation value) { - if (explicitAnnotationBuilder_ == null) { - if (explicitAnnotation_ != null) { - explicitAnnotation_ = - com.google.cloud.videointelligence.v1.ExplicitContentAnnotation.newBuilder( - explicitAnnotation_) - .mergeFrom(value) - .buildPartial(); - } else { - explicitAnnotation_ = value; + public Builder addSpeechTranscriptions( + com.google.cloud.videointelligence.v1.SpeechTranscription value) { + if (speechTranscriptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSpeechTranscriptionsIsMutable(); + speechTranscriptions_.add(value); + onChanged(); + } else { + speechTranscriptionsBuilder_.addMessage(value); + } + return this; + } + /** + * + * + *
+     * Speech transcription.
+     * 
+ * + * + * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; + * + */ + public Builder addSpeechTranscriptions( + int index, com.google.cloud.videointelligence.v1.SpeechTranscription value) { + if (speechTranscriptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); } + ensureSpeechTranscriptionsIsMutable(); + speechTranscriptions_.add(index, value); + onChanged(); + } else { + speechTranscriptionsBuilder_.addMessage(index, value); + } + return this; + } + /** + * + * + *
+     * Speech transcription.
+     * 
+ * + * + * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; + * + */ + public Builder addSpeechTranscriptions( + com.google.cloud.videointelligence.v1.SpeechTranscription.Builder builderForValue) { + if (speechTranscriptionsBuilder_ == null) { + ensureSpeechTranscriptionsIsMutable(); + speechTranscriptions_.add(builderForValue.build()); + onChanged(); + } else { + speechTranscriptionsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * Speech transcription.
+     * 
+ * + * + * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; + * + */ + public Builder addSpeechTranscriptions( + int index, + com.google.cloud.videointelligence.v1.SpeechTranscription.Builder builderForValue) { + if (speechTranscriptionsBuilder_ == null) { + ensureSpeechTranscriptionsIsMutable(); + speechTranscriptions_.add(index, builderForValue.build()); + onChanged(); + } else { + speechTranscriptionsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * Speech transcription.
+     * 
+ * + * + * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; + * + */ + public Builder addAllSpeechTranscriptions( + java.lang.Iterable + values) { + if (speechTranscriptionsBuilder_ == null) { + ensureSpeechTranscriptionsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, speechTranscriptions_); + onChanged(); + } else { + speechTranscriptionsBuilder_.addAllMessages(values); + } + return this; + } + /** + * + * + *
+     * Speech transcription.
+     * 
+ * + * + * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; + * + */ + public Builder clearSpeechTranscriptions() { + if (speechTranscriptionsBuilder_ == null) { + speechTranscriptions_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000100); + onChanged(); + } else { + speechTranscriptionsBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Speech transcription.
+     * 
+ * + * + * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; + * + */ + public Builder removeSpeechTranscriptions(int index) { + if (speechTranscriptionsBuilder_ == null) { + ensureSpeechTranscriptionsIsMutable(); + speechTranscriptions_.remove(index); onChanged(); } else { - explicitAnnotationBuilder_.mergeFrom(value); + speechTranscriptionsBuilder_.remove(index); + } + return this; + } + /** + * + * + *
+     * Speech transcription.
+     * 
+ * + * + * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; + * + */ + public com.google.cloud.videointelligence.v1.SpeechTranscription.Builder + getSpeechTranscriptionsBuilder(int index) { + return getSpeechTranscriptionsFieldBuilder().getBuilder(index); + } + /** + * + * + *
+     * Speech transcription.
+     * 
+ * + * + * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; + * + */ + public com.google.cloud.videointelligence.v1.SpeechTranscriptionOrBuilder + getSpeechTranscriptionsOrBuilder(int index) { + if (speechTranscriptionsBuilder_ == null) { + return speechTranscriptions_.get(index); + } else { + return speechTranscriptionsBuilder_.getMessageOrBuilder(index); } - - return this; } /** * * *
-     * Explicit content annotation.
+     * Speech transcription.
      * 
* - * .google.cloud.videointelligence.v1.ExplicitContentAnnotation explicit_annotation = 7; + * + * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; * */ - public Builder clearExplicitAnnotation() { - if (explicitAnnotationBuilder_ == null) { - explicitAnnotation_ = null; - onChanged(); + public java.util.List< + ? extends com.google.cloud.videointelligence.v1.SpeechTranscriptionOrBuilder> + getSpeechTranscriptionsOrBuilderList() { + if (speechTranscriptionsBuilder_ != null) { + return speechTranscriptionsBuilder_.getMessageOrBuilderList(); } else { - explicitAnnotation_ = null; - explicitAnnotationBuilder_ = null; + return java.util.Collections.unmodifiableList(speechTranscriptions_); } - - return this; } /** * * *
-     * Explicit content annotation.
+     * Speech transcription.
      * 
* - * .google.cloud.videointelligence.v1.ExplicitContentAnnotation explicit_annotation = 7; + * + * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; * */ - public com.google.cloud.videointelligence.v1.ExplicitContentAnnotation.Builder - getExplicitAnnotationBuilder() { - - onChanged(); - return getExplicitAnnotationFieldBuilder().getBuilder(); + public com.google.cloud.videointelligence.v1.SpeechTranscription.Builder + addSpeechTranscriptionsBuilder() { + return getSpeechTranscriptionsFieldBuilder() + .addBuilder( + com.google.cloud.videointelligence.v1.SpeechTranscription.getDefaultInstance()); } /** * * *
-     * Explicit content annotation.
+     * Speech transcription.
      * 
* - * .google.cloud.videointelligence.v1.ExplicitContentAnnotation explicit_annotation = 7; + * + * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; * */ - public com.google.cloud.videointelligence.v1.ExplicitContentAnnotationOrBuilder - getExplicitAnnotationOrBuilder() { - if (explicitAnnotationBuilder_ != null) { - return explicitAnnotationBuilder_.getMessageOrBuilder(); - } else { - return explicitAnnotation_ == null - ? com.google.cloud.videointelligence.v1.ExplicitContentAnnotation.getDefaultInstance() - : explicitAnnotation_; - } + public com.google.cloud.videointelligence.v1.SpeechTranscription.Builder + addSpeechTranscriptionsBuilder(int index) { + return getSpeechTranscriptionsFieldBuilder() + .addBuilder( + index, + com.google.cloud.videointelligence.v1.SpeechTranscription.getDefaultInstance()); } /** * * *
-     * Explicit content annotation.
+     * Speech transcription.
      * 
* - * .google.cloud.videointelligence.v1.ExplicitContentAnnotation explicit_annotation = 7; + * + * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; * */ - private com.google.protobuf.SingleFieldBuilderV3< - com.google.cloud.videointelligence.v1.ExplicitContentAnnotation, - com.google.cloud.videointelligence.v1.ExplicitContentAnnotation.Builder, - com.google.cloud.videointelligence.v1.ExplicitContentAnnotationOrBuilder> - getExplicitAnnotationFieldBuilder() { - if (explicitAnnotationBuilder_ == null) { - explicitAnnotationBuilder_ = - new com.google.protobuf.SingleFieldBuilderV3< - com.google.cloud.videointelligence.v1.ExplicitContentAnnotation, - com.google.cloud.videointelligence.v1.ExplicitContentAnnotation.Builder, - com.google.cloud.videointelligence.v1.ExplicitContentAnnotationOrBuilder>( - getExplicitAnnotation(), getParentForChildren(), isClean()); - explicitAnnotation_ = null; + public java.util.List + getSpeechTranscriptionsBuilderList() { + return getSpeechTranscriptionsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.videointelligence.v1.SpeechTranscription, + com.google.cloud.videointelligence.v1.SpeechTranscription.Builder, + com.google.cloud.videointelligence.v1.SpeechTranscriptionOrBuilder> + getSpeechTranscriptionsFieldBuilder() { + if (speechTranscriptionsBuilder_ == null) { + speechTranscriptionsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.videointelligence.v1.SpeechTranscription, + com.google.cloud.videointelligence.v1.SpeechTranscription.Builder, + com.google.cloud.videointelligence.v1.SpeechTranscriptionOrBuilder>( + speechTranscriptions_, + ((bitField0_ & 0x00000100) != 0), + getParentForChildren(), + isClean()); + speechTranscriptions_ = null; } - return explicitAnnotationBuilder_; + return speechTranscriptionsBuilder_; } - private java.util.List - speechTranscriptions_ = java.util.Collections.emptyList(); + private java.util.List textAnnotations_ = + java.util.Collections.emptyList(); - private void ensureSpeechTranscriptionsIsMutable() { - if (!((bitField0_ & 0x00000080) != 0)) { - speechTranscriptions_ = - new java.util.ArrayList( - speechTranscriptions_); - bitField0_ |= 0x00000080; + private void ensureTextAnnotationsIsMutable() { + if (!((bitField0_ & 0x00000200) != 0)) { + textAnnotations_ = + new java.util.ArrayList( + textAnnotations_); + bitField0_ |= 0x00000200; } } private com.google.protobuf.RepeatedFieldBuilderV3< - com.google.cloud.videointelligence.v1.SpeechTranscription, - com.google.cloud.videointelligence.v1.SpeechTranscription.Builder, - com.google.cloud.videointelligence.v1.SpeechTranscriptionOrBuilder> - speechTranscriptionsBuilder_; + com.google.cloud.videointelligence.v1.TextAnnotation, + com.google.cloud.videointelligence.v1.TextAnnotation.Builder, + com.google.cloud.videointelligence.v1.TextAnnotationOrBuilder> + textAnnotationsBuilder_; /** * * *
-     * Speech transcription.
+     * OCR text detection and tracking.
+     * Annotations for list of detected text snippets. Each will have list of
+     * frame information associated with it.
      * 
* - * - * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; + * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; * */ - public java.util.List - getSpeechTranscriptionsList() { - if (speechTranscriptionsBuilder_ == null) { - return java.util.Collections.unmodifiableList(speechTranscriptions_); + public java.util.List + getTextAnnotationsList() { + if (textAnnotationsBuilder_ == null) { + return java.util.Collections.unmodifiableList(textAnnotations_); } else { - return speechTranscriptionsBuilder_.getMessageList(); + return textAnnotationsBuilder_.getMessageList(); } } /** * * *
-     * Speech transcription.
+     * OCR text detection and tracking.
+     * Annotations for list of detected text snippets. Each will have list of
+     * frame information associated with it.
      * 
* - * - * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; + * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; * */ - public int getSpeechTranscriptionsCount() { - if (speechTranscriptionsBuilder_ == null) { - return speechTranscriptions_.size(); + public int getTextAnnotationsCount() { + if (textAnnotationsBuilder_ == null) { + return textAnnotations_.size(); } else { - return speechTranscriptionsBuilder_.getCount(); + return textAnnotationsBuilder_.getCount(); } } /** * * *
-     * Speech transcription.
+     * OCR text detection and tracking.
+     * Annotations for list of detected text snippets. Each will have list of
+     * frame information associated with it.
      * 
* - * - * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; + * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; * */ - public com.google.cloud.videointelligence.v1.SpeechTranscription getSpeechTranscriptions( - int index) { - if (speechTranscriptionsBuilder_ == null) { - return speechTranscriptions_.get(index); + public com.google.cloud.videointelligence.v1.TextAnnotation getTextAnnotations(int index) { + if (textAnnotationsBuilder_ == null) { + return textAnnotations_.get(index); } else { - return speechTranscriptionsBuilder_.getMessage(index); + return textAnnotationsBuilder_.getMessage(index); } } /** * * *
-     * Speech transcription.
+     * OCR text detection and tracking.
+     * Annotations for list of detected text snippets. Each will have list of
+     * frame information associated with it.
      * 
* - * - * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; + * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; * */ - public Builder setSpeechTranscriptions( - int index, com.google.cloud.videointelligence.v1.SpeechTranscription value) { - if (speechTranscriptionsBuilder_ == null) { + public Builder setTextAnnotations( + int index, com.google.cloud.videointelligence.v1.TextAnnotation value) { + if (textAnnotationsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureSpeechTranscriptionsIsMutable(); - speechTranscriptions_.set(index, value); + ensureTextAnnotationsIsMutable(); + textAnnotations_.set(index, value); onChanged(); } else { - speechTranscriptionsBuilder_.setMessage(index, value); + textAnnotationsBuilder_.setMessage(index, value); } return this; } @@ -6059,22 +7267,22 @@ public Builder setSpeechTranscriptions( * * *
-     * Speech transcription.
+     * OCR text detection and tracking.
+     * Annotations for list of detected text snippets. Each will have list of
+     * frame information associated with it.
      * 
* - * - * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; + * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; * */ - public Builder setSpeechTranscriptions( - int index, - com.google.cloud.videointelligence.v1.SpeechTranscription.Builder builderForValue) { - if (speechTranscriptionsBuilder_ == null) { - ensureSpeechTranscriptionsIsMutable(); - speechTranscriptions_.set(index, builderForValue.build()); + public Builder setTextAnnotations( + int index, com.google.cloud.videointelligence.v1.TextAnnotation.Builder builderForValue) { + if (textAnnotationsBuilder_ == null) { + ensureTextAnnotationsIsMutable(); + textAnnotations_.set(index, builderForValue.build()); onChanged(); } else { - speechTranscriptionsBuilder_.setMessage(index, builderForValue.build()); + textAnnotationsBuilder_.setMessage(index, builderForValue.build()); } return this; } @@ -6082,24 +7290,24 @@ public Builder setSpeechTranscriptions( * * *
-     * Speech transcription.
+     * OCR text detection and tracking.
+     * Annotations for list of detected text snippets. Each will have list of
+     * frame information associated with it.
      * 
* - * - * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; + * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; * */ - public Builder addSpeechTranscriptions( - com.google.cloud.videointelligence.v1.SpeechTranscription value) { - if (speechTranscriptionsBuilder_ == null) { + public Builder addTextAnnotations(com.google.cloud.videointelligence.v1.TextAnnotation value) { + if (textAnnotationsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureSpeechTranscriptionsIsMutable(); - speechTranscriptions_.add(value); + ensureTextAnnotationsIsMutable(); + textAnnotations_.add(value); onChanged(); } else { - speechTranscriptionsBuilder_.addMessage(value); + textAnnotationsBuilder_.addMessage(value); } return this; } @@ -6107,24 +7315,25 @@ public Builder addSpeechTranscriptions( * * *
-     * Speech transcription.
+     * OCR text detection and tracking.
+     * Annotations for list of detected text snippets. Each will have list of
+     * frame information associated with it.
      * 
* - * - * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; + * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; * */ - public Builder addSpeechTranscriptions( - int index, com.google.cloud.videointelligence.v1.SpeechTranscription value) { - if (speechTranscriptionsBuilder_ == null) { + public Builder addTextAnnotations( + int index, com.google.cloud.videointelligence.v1.TextAnnotation value) { + if (textAnnotationsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureSpeechTranscriptionsIsMutable(); - speechTranscriptions_.add(index, value); + ensureTextAnnotationsIsMutable(); + textAnnotations_.add(index, value); onChanged(); } else { - speechTranscriptionsBuilder_.addMessage(index, value); + textAnnotationsBuilder_.addMessage(index, value); } return this; } @@ -6132,21 +7341,22 @@ public Builder addSpeechTranscriptions( * * *
-     * Speech transcription.
+     * OCR text detection and tracking.
+     * Annotations for list of detected text snippets. Each will have list of
+     * frame information associated with it.
      * 
* - * - * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; + * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; * */ - public Builder addSpeechTranscriptions( - com.google.cloud.videointelligence.v1.SpeechTranscription.Builder builderForValue) { - if (speechTranscriptionsBuilder_ == null) { - ensureSpeechTranscriptionsIsMutable(); - speechTranscriptions_.add(builderForValue.build()); + public Builder addTextAnnotations( + com.google.cloud.videointelligence.v1.TextAnnotation.Builder builderForValue) { + if (textAnnotationsBuilder_ == null) { + ensureTextAnnotationsIsMutable(); + textAnnotations_.add(builderForValue.build()); onChanged(); } else { - speechTranscriptionsBuilder_.addMessage(builderForValue.build()); + textAnnotationsBuilder_.addMessage(builderForValue.build()); } return this; } @@ -6154,22 +7364,22 @@ public Builder addSpeechTranscriptions( * * *
-     * Speech transcription.
+     * OCR text detection and tracking.
+     * Annotations for list of detected text snippets. Each will have list of
+     * frame information associated with it.
      * 
* - * - * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; + * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; * */ - public Builder addSpeechTranscriptions( - int index, - com.google.cloud.videointelligence.v1.SpeechTranscription.Builder builderForValue) { - if (speechTranscriptionsBuilder_ == null) { - ensureSpeechTranscriptionsIsMutable(); - speechTranscriptions_.add(index, builderForValue.build()); + public Builder addTextAnnotations( + int index, com.google.cloud.videointelligence.v1.TextAnnotation.Builder builderForValue) { + if (textAnnotationsBuilder_ == null) { + ensureTextAnnotationsIsMutable(); + textAnnotations_.add(index, builderForValue.build()); onChanged(); } else { - speechTranscriptionsBuilder_.addMessage(index, builderForValue.build()); + textAnnotationsBuilder_.addMessage(index, builderForValue.build()); } return this; } @@ -6177,22 +7387,22 @@ public Builder addSpeechTranscriptions( * * *
-     * Speech transcription.
+     * OCR text detection and tracking.
+     * Annotations for list of detected text snippets. Each will have list of
+     * frame information associated with it.
      * 
* - * - * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; + * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; * */ - public Builder addAllSpeechTranscriptions( - java.lang.Iterable - values) { - if (speechTranscriptionsBuilder_ == null) { - ensureSpeechTranscriptionsIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll(values, speechTranscriptions_); + public Builder addAllTextAnnotations( + java.lang.Iterable values) { + if (textAnnotationsBuilder_ == null) { + ensureTextAnnotationsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, textAnnotations_); onChanged(); } else { - speechTranscriptionsBuilder_.addAllMessages(values); + textAnnotationsBuilder_.addAllMessages(values); } return this; } @@ -6200,20 +7410,21 @@ public Builder addAllSpeechTranscriptions( * * *
-     * Speech transcription.
+     * OCR text detection and tracking.
+     * Annotations for list of detected text snippets. Each will have list of
+     * frame information associated with it.
      * 
* - * - * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; + * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; * */ - public Builder clearSpeechTranscriptions() { - if (speechTranscriptionsBuilder_ == null) { - speechTranscriptions_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000080); + public Builder clearTextAnnotations() { + if (textAnnotationsBuilder_ == null) { + textAnnotations_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000200); onChanged(); } else { - speechTranscriptionsBuilder_.clear(); + textAnnotationsBuilder_.clear(); } return this; } @@ -6221,20 +7432,21 @@ public Builder clearSpeechTranscriptions() { * * *
-     * Speech transcription.
+     * OCR text detection and tracking.
+     * Annotations for list of detected text snippets. Each will have list of
+     * frame information associated with it.
      * 
* - * - * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; + * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; * */ - public Builder removeSpeechTranscriptions(int index) { - if (speechTranscriptionsBuilder_ == null) { - ensureSpeechTranscriptionsIsMutable(); - speechTranscriptions_.remove(index); + public Builder removeTextAnnotations(int index) { + if (textAnnotationsBuilder_ == null) { + ensureTextAnnotationsIsMutable(); + textAnnotations_.remove(index); onChanged(); } else { - speechTranscriptionsBuilder_.remove(index); + textAnnotationsBuilder_.remove(index); } return this; } @@ -6242,226 +7454,226 @@ public Builder removeSpeechTranscriptions(int index) { * * *
-     * Speech transcription.
+     * OCR text detection and tracking.
+     * Annotations for list of detected text snippets. Each will have list of
+     * frame information associated with it.
      * 
* - * - * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; + * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; * */ - public com.google.cloud.videointelligence.v1.SpeechTranscription.Builder - getSpeechTranscriptionsBuilder(int index) { - return getSpeechTranscriptionsFieldBuilder().getBuilder(index); + public com.google.cloud.videointelligence.v1.TextAnnotation.Builder getTextAnnotationsBuilder( + int index) { + return getTextAnnotationsFieldBuilder().getBuilder(index); } /** * * *
-     * Speech transcription.
+     * OCR text detection and tracking.
+     * Annotations for list of detected text snippets. Each will have list of
+     * frame information associated with it.
      * 
* - * - * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; + * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; * */ - public com.google.cloud.videointelligence.v1.SpeechTranscriptionOrBuilder - getSpeechTranscriptionsOrBuilder(int index) { - if (speechTranscriptionsBuilder_ == null) { - return speechTranscriptions_.get(index); + public com.google.cloud.videointelligence.v1.TextAnnotationOrBuilder + getTextAnnotationsOrBuilder(int index) { + if (textAnnotationsBuilder_ == null) { + return textAnnotations_.get(index); } else { - return speechTranscriptionsBuilder_.getMessageOrBuilder(index); + return textAnnotationsBuilder_.getMessageOrBuilder(index); } } /** * * *
-     * Speech transcription.
+     * OCR text detection and tracking.
+     * Annotations for list of detected text snippets. Each will have list of
+     * frame information associated with it.
      * 
* - * - * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; + * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; * */ - public java.util.List< - ? extends com.google.cloud.videointelligence.v1.SpeechTranscriptionOrBuilder> - getSpeechTranscriptionsOrBuilderList() { - if (speechTranscriptionsBuilder_ != null) { - return speechTranscriptionsBuilder_.getMessageOrBuilderList(); + public java.util.List + getTextAnnotationsOrBuilderList() { + if (textAnnotationsBuilder_ != null) { + return textAnnotationsBuilder_.getMessageOrBuilderList(); } else { - return java.util.Collections.unmodifiableList(speechTranscriptions_); + return java.util.Collections.unmodifiableList(textAnnotations_); } } /** * * *
-     * Speech transcription.
+     * OCR text detection and tracking.
+     * Annotations for list of detected text snippets. Each will have list of
+     * frame information associated with it.
      * 
* - * - * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; + * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; * */ - public com.google.cloud.videointelligence.v1.SpeechTranscription.Builder - addSpeechTranscriptionsBuilder() { - return getSpeechTranscriptionsFieldBuilder() - .addBuilder( - com.google.cloud.videointelligence.v1.SpeechTranscription.getDefaultInstance()); + public com.google.cloud.videointelligence.v1.TextAnnotation.Builder + addTextAnnotationsBuilder() { + return getTextAnnotationsFieldBuilder() + .addBuilder(com.google.cloud.videointelligence.v1.TextAnnotation.getDefaultInstance()); } /** * * *
-     * Speech transcription.
+     * OCR text detection and tracking.
+     * Annotations for list of detected text snippets. Each will have list of
+     * frame information associated with it.
      * 
* - * - * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; + * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; * */ - public com.google.cloud.videointelligence.v1.SpeechTranscription.Builder - addSpeechTranscriptionsBuilder(int index) { - return getSpeechTranscriptionsFieldBuilder() + public com.google.cloud.videointelligence.v1.TextAnnotation.Builder addTextAnnotationsBuilder( + int index) { + return getTextAnnotationsFieldBuilder() .addBuilder( - index, - com.google.cloud.videointelligence.v1.SpeechTranscription.getDefaultInstance()); + index, com.google.cloud.videointelligence.v1.TextAnnotation.getDefaultInstance()); } /** * * *
-     * Speech transcription.
+     * OCR text detection and tracking.
+     * Annotations for list of detected text snippets. Each will have list of
+     * frame information associated with it.
      * 
* - * - * repeated .google.cloud.videointelligence.v1.SpeechTranscription speech_transcriptions = 11; + * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; * */ - public java.util.List - getSpeechTranscriptionsBuilderList() { - return getSpeechTranscriptionsFieldBuilder().getBuilderList(); + public java.util.List + getTextAnnotationsBuilderList() { + return getTextAnnotationsFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilderV3< - com.google.cloud.videointelligence.v1.SpeechTranscription, - com.google.cloud.videointelligence.v1.SpeechTranscription.Builder, - com.google.cloud.videointelligence.v1.SpeechTranscriptionOrBuilder> - getSpeechTranscriptionsFieldBuilder() { - if (speechTranscriptionsBuilder_ == null) { - speechTranscriptionsBuilder_ = + com.google.cloud.videointelligence.v1.TextAnnotation, + com.google.cloud.videointelligence.v1.TextAnnotation.Builder, + com.google.cloud.videointelligence.v1.TextAnnotationOrBuilder> + getTextAnnotationsFieldBuilder() { + if (textAnnotationsBuilder_ == null) { + textAnnotationsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< - com.google.cloud.videointelligence.v1.SpeechTranscription, - com.google.cloud.videointelligence.v1.SpeechTranscription.Builder, - com.google.cloud.videointelligence.v1.SpeechTranscriptionOrBuilder>( - speechTranscriptions_, - ((bitField0_ & 0x00000080) != 0), + com.google.cloud.videointelligence.v1.TextAnnotation, + com.google.cloud.videointelligence.v1.TextAnnotation.Builder, + com.google.cloud.videointelligence.v1.TextAnnotationOrBuilder>( + textAnnotations_, + ((bitField0_ & 0x00000200) != 0), getParentForChildren(), isClean()); - speechTranscriptions_ = null; + textAnnotations_ = null; } - return speechTranscriptionsBuilder_; + return textAnnotationsBuilder_; } - private java.util.List textAnnotations_ = - java.util.Collections.emptyList(); + private java.util.List + objectAnnotations_ = java.util.Collections.emptyList(); - private void ensureTextAnnotationsIsMutable() { - if (!((bitField0_ & 0x00000100) != 0)) { - textAnnotations_ = - new java.util.ArrayList( - textAnnotations_); - bitField0_ |= 0x00000100; + private void ensureObjectAnnotationsIsMutable() { + if (!((bitField0_ & 0x00000400) != 0)) { + objectAnnotations_ = + new java.util.ArrayList( + objectAnnotations_); + bitField0_ |= 0x00000400; } } private com.google.protobuf.RepeatedFieldBuilderV3< - com.google.cloud.videointelligence.v1.TextAnnotation, - com.google.cloud.videointelligence.v1.TextAnnotation.Builder, - com.google.cloud.videointelligence.v1.TextAnnotationOrBuilder> - textAnnotationsBuilder_; + com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation, + com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.Builder, + com.google.cloud.videointelligence.v1.ObjectTrackingAnnotationOrBuilder> + objectAnnotationsBuilder_; /** * * *
-     * OCR text detection and tracking.
-     * Annotations for list of detected text snippets. Each will have list of
-     * frame information associated with it.
+     * Annotations for list of objects detected and tracked in video.
      * 
* - * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; + * + * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; * */ - public java.util.List - getTextAnnotationsList() { - if (textAnnotationsBuilder_ == null) { - return java.util.Collections.unmodifiableList(textAnnotations_); + public java.util.List + getObjectAnnotationsList() { + if (objectAnnotationsBuilder_ == null) { + return java.util.Collections.unmodifiableList(objectAnnotations_); } else { - return textAnnotationsBuilder_.getMessageList(); + return objectAnnotationsBuilder_.getMessageList(); } } /** * * *
-     * OCR text detection and tracking.
-     * Annotations for list of detected text snippets. Each will have list of
-     * frame information associated with it.
+     * Annotations for list of objects detected and tracked in video.
      * 
* - * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; + * + * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; * */ - public int getTextAnnotationsCount() { - if (textAnnotationsBuilder_ == null) { - return textAnnotations_.size(); + public int getObjectAnnotationsCount() { + if (objectAnnotationsBuilder_ == null) { + return objectAnnotations_.size(); } else { - return textAnnotationsBuilder_.getCount(); + return objectAnnotationsBuilder_.getCount(); } } /** * * *
-     * OCR text detection and tracking.
-     * Annotations for list of detected text snippets. Each will have list of
-     * frame information associated with it.
+     * Annotations for list of objects detected and tracked in video.
      * 
* - * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; + * + * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; * */ - public com.google.cloud.videointelligence.v1.TextAnnotation getTextAnnotations(int index) { - if (textAnnotationsBuilder_ == null) { - return textAnnotations_.get(index); + public com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation getObjectAnnotations( + int index) { + if (objectAnnotationsBuilder_ == null) { + return objectAnnotations_.get(index); } else { - return textAnnotationsBuilder_.getMessage(index); + return objectAnnotationsBuilder_.getMessage(index); } } /** * * *
-     * OCR text detection and tracking.
-     * Annotations for list of detected text snippets. Each will have list of
-     * frame information associated with it.
+     * Annotations for list of objects detected and tracked in video.
      * 
* - * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; + * + * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; * */ - public Builder setTextAnnotations( - int index, com.google.cloud.videointelligence.v1.TextAnnotation value) { - if (textAnnotationsBuilder_ == null) { + public Builder setObjectAnnotations( + int index, com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation value) { + if (objectAnnotationsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureTextAnnotationsIsMutable(); - textAnnotations_.set(index, value); + ensureObjectAnnotationsIsMutable(); + objectAnnotations_.set(index, value); onChanged(); } else { - textAnnotationsBuilder_.setMessage(index, value); + objectAnnotationsBuilder_.setMessage(index, value); } return this; } @@ -6469,22 +7681,22 @@ public Builder setTextAnnotations( * * *
-     * OCR text detection and tracking.
-     * Annotations for list of detected text snippets. Each will have list of
-     * frame information associated with it.
+     * Annotations for list of objects detected and tracked in video.
      * 
* - * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; + * + * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; * */ - public Builder setTextAnnotations( - int index, com.google.cloud.videointelligence.v1.TextAnnotation.Builder builderForValue) { - if (textAnnotationsBuilder_ == null) { - ensureTextAnnotationsIsMutable(); - textAnnotations_.set(index, builderForValue.build()); + public Builder setObjectAnnotations( + int index, + com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.Builder builderForValue) { + if (objectAnnotationsBuilder_ == null) { + ensureObjectAnnotationsIsMutable(); + objectAnnotations_.set(index, builderForValue.build()); onChanged(); } else { - textAnnotationsBuilder_.setMessage(index, builderForValue.build()); + objectAnnotationsBuilder_.setMessage(index, builderForValue.build()); } return this; } @@ -6492,24 +7704,24 @@ public Builder setTextAnnotations( * * *
-     * OCR text detection and tracking.
-     * Annotations for list of detected text snippets. Each will have list of
-     * frame information associated with it.
+     * Annotations for list of objects detected and tracked in video.
      * 
* - * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; + * + * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; * */ - public Builder addTextAnnotations(com.google.cloud.videointelligence.v1.TextAnnotation value) { - if (textAnnotationsBuilder_ == null) { + public Builder addObjectAnnotations( + com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation value) { + if (objectAnnotationsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureTextAnnotationsIsMutable(); - textAnnotations_.add(value); + ensureObjectAnnotationsIsMutable(); + objectAnnotations_.add(value); onChanged(); } else { - textAnnotationsBuilder_.addMessage(value); + objectAnnotationsBuilder_.addMessage(value); } return this; } @@ -6517,25 +7729,24 @@ public Builder addTextAnnotations(com.google.cloud.videointelligence.v1.TextAnno * * *
-     * OCR text detection and tracking.
-     * Annotations for list of detected text snippets. Each will have list of
-     * frame information associated with it.
+     * Annotations for list of objects detected and tracked in video.
      * 
* - * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; + * + * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; * */ - public Builder addTextAnnotations( - int index, com.google.cloud.videointelligence.v1.TextAnnotation value) { - if (textAnnotationsBuilder_ == null) { + public Builder addObjectAnnotations( + int index, com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation value) { + if (objectAnnotationsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureTextAnnotationsIsMutable(); - textAnnotations_.add(index, value); + ensureObjectAnnotationsIsMutable(); + objectAnnotations_.add(index, value); onChanged(); } else { - textAnnotationsBuilder_.addMessage(index, value); + objectAnnotationsBuilder_.addMessage(index, value); } return this; } @@ -6543,22 +7754,21 @@ public Builder addTextAnnotations( * * *
-     * OCR text detection and tracking.
-     * Annotations for list of detected text snippets. Each will have list of
-     * frame information associated with it.
+     * Annotations for list of objects detected and tracked in video.
      * 
* - * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; + * + * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; * */ - public Builder addTextAnnotations( - com.google.cloud.videointelligence.v1.TextAnnotation.Builder builderForValue) { - if (textAnnotationsBuilder_ == null) { - ensureTextAnnotationsIsMutable(); - textAnnotations_.add(builderForValue.build()); + public Builder addObjectAnnotations( + com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.Builder builderForValue) { + if (objectAnnotationsBuilder_ == null) { + ensureObjectAnnotationsIsMutable(); + objectAnnotations_.add(builderForValue.build()); onChanged(); } else { - textAnnotationsBuilder_.addMessage(builderForValue.build()); + objectAnnotationsBuilder_.addMessage(builderForValue.build()); } return this; } @@ -6566,22 +7776,22 @@ public Builder addTextAnnotations( * * *
-     * OCR text detection and tracking.
-     * Annotations for list of detected text snippets. Each will have list of
-     * frame information associated with it.
+     * Annotations for list of objects detected and tracked in video.
      * 
* - * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; + * + * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; * */ - public Builder addTextAnnotations( - int index, com.google.cloud.videointelligence.v1.TextAnnotation.Builder builderForValue) { - if (textAnnotationsBuilder_ == null) { - ensureTextAnnotationsIsMutable(); - textAnnotations_.add(index, builderForValue.build()); + public Builder addObjectAnnotations( + int index, + com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.Builder builderForValue) { + if (objectAnnotationsBuilder_ == null) { + ensureObjectAnnotationsIsMutable(); + objectAnnotations_.add(index, builderForValue.build()); onChanged(); } else { - textAnnotationsBuilder_.addMessage(index, builderForValue.build()); + objectAnnotationsBuilder_.addMessage(index, builderForValue.build()); } return this; } @@ -6589,22 +7799,22 @@ public Builder addTextAnnotations( * * *
-     * OCR text detection and tracking.
-     * Annotations for list of detected text snippets. Each will have list of
-     * frame information associated with it.
+     * Annotations for list of objects detected and tracked in video.
      * 
* - * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; + * + * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; * */ - public Builder addAllTextAnnotations( - java.lang.Iterable values) { - if (textAnnotationsBuilder_ == null) { - ensureTextAnnotationsIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll(values, textAnnotations_); + public Builder addAllObjectAnnotations( + java.lang.Iterable + values) { + if (objectAnnotationsBuilder_ == null) { + ensureObjectAnnotationsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, objectAnnotations_); onChanged(); } else { - textAnnotationsBuilder_.addAllMessages(values); + objectAnnotationsBuilder_.addAllMessages(values); } return this; } @@ -6612,21 +7822,20 @@ public Builder addAllTextAnnotations( * * *
-     * OCR text detection and tracking.
-     * Annotations for list of detected text snippets. Each will have list of
-     * frame information associated with it.
+     * Annotations for list of objects detected and tracked in video.
      * 
* - * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; + * + * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; * */ - public Builder clearTextAnnotations() { - if (textAnnotationsBuilder_ == null) { - textAnnotations_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000100); + public Builder clearObjectAnnotations() { + if (objectAnnotationsBuilder_ == null) { + objectAnnotations_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000400); onChanged(); } else { - textAnnotationsBuilder_.clear(); + objectAnnotationsBuilder_.clear(); } return this; } @@ -6634,21 +7843,20 @@ public Builder clearTextAnnotations() { * * *
-     * OCR text detection and tracking.
-     * Annotations for list of detected text snippets. Each will have list of
-     * frame information associated with it.
+     * Annotations for list of objects detected and tracked in video.
      * 
* - * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; + * + * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; * */ - public Builder removeTextAnnotations(int index) { - if (textAnnotationsBuilder_ == null) { - ensureTextAnnotationsIsMutable(); - textAnnotations_.remove(index); + public Builder removeObjectAnnotations(int index) { + if (objectAnnotationsBuilder_ == null) { + ensureObjectAnnotationsIsMutable(); + objectAnnotations_.remove(index); onChanged(); } else { - textAnnotationsBuilder_.remove(index); + objectAnnotationsBuilder_.remove(index); } return this; } @@ -6656,226 +7864,224 @@ public Builder removeTextAnnotations(int index) { * * *
-     * OCR text detection and tracking.
-     * Annotations for list of detected text snippets. Each will have list of
-     * frame information associated with it.
+     * Annotations for list of objects detected and tracked in video.
      * 
* - * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; + * + * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; * */ - public com.google.cloud.videointelligence.v1.TextAnnotation.Builder getTextAnnotationsBuilder( - int index) { - return getTextAnnotationsFieldBuilder().getBuilder(index); + public com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.Builder + getObjectAnnotationsBuilder(int index) { + return getObjectAnnotationsFieldBuilder().getBuilder(index); } /** * * *
-     * OCR text detection and tracking.
-     * Annotations for list of detected text snippets. Each will have list of
-     * frame information associated with it.
+     * Annotations for list of objects detected and tracked in video.
      * 
* - * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; + * + * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; * */ - public com.google.cloud.videointelligence.v1.TextAnnotationOrBuilder - getTextAnnotationsOrBuilder(int index) { - if (textAnnotationsBuilder_ == null) { - return textAnnotations_.get(index); + public com.google.cloud.videointelligence.v1.ObjectTrackingAnnotationOrBuilder + getObjectAnnotationsOrBuilder(int index) { + if (objectAnnotationsBuilder_ == null) { + return objectAnnotations_.get(index); } else { - return textAnnotationsBuilder_.getMessageOrBuilder(index); + return objectAnnotationsBuilder_.getMessageOrBuilder(index); } } /** * * *
-     * OCR text detection and tracking.
-     * Annotations for list of detected text snippets. Each will have list of
-     * frame information associated with it.
+     * Annotations for list of objects detected and tracked in video.
      * 
* - * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; + * + * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; * */ - public java.util.List - getTextAnnotationsOrBuilderList() { - if (textAnnotationsBuilder_ != null) { - return textAnnotationsBuilder_.getMessageOrBuilderList(); + public java.util.List< + ? extends com.google.cloud.videointelligence.v1.ObjectTrackingAnnotationOrBuilder> + getObjectAnnotationsOrBuilderList() { + if (objectAnnotationsBuilder_ != null) { + return objectAnnotationsBuilder_.getMessageOrBuilderList(); } else { - return java.util.Collections.unmodifiableList(textAnnotations_); + return java.util.Collections.unmodifiableList(objectAnnotations_); } } /** * * *
-     * OCR text detection and tracking.
-     * Annotations for list of detected text snippets. Each will have list of
-     * frame information associated with it.
+     * Annotations for list of objects detected and tracked in video.
      * 
* - * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; + * + * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; * */ - public com.google.cloud.videointelligence.v1.TextAnnotation.Builder - addTextAnnotationsBuilder() { - return getTextAnnotationsFieldBuilder() - .addBuilder(com.google.cloud.videointelligence.v1.TextAnnotation.getDefaultInstance()); + public com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.Builder + addObjectAnnotationsBuilder() { + return getObjectAnnotationsFieldBuilder() + .addBuilder( + com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.getDefaultInstance()); } /** * * *
-     * OCR text detection and tracking.
-     * Annotations for list of detected text snippets. Each will have list of
-     * frame information associated with it.
+     * Annotations for list of objects detected and tracked in video.
      * 
* - * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; + * + * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; * */ - public com.google.cloud.videointelligence.v1.TextAnnotation.Builder addTextAnnotationsBuilder( - int index) { - return getTextAnnotationsFieldBuilder() + public com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.Builder + addObjectAnnotationsBuilder(int index) { + return getObjectAnnotationsFieldBuilder() .addBuilder( - index, com.google.cloud.videointelligence.v1.TextAnnotation.getDefaultInstance()); + index, + com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.getDefaultInstance()); } /** * * *
-     * OCR text detection and tracking.
-     * Annotations for list of detected text snippets. Each will have list of
-     * frame information associated with it.
+     * Annotations for list of objects detected and tracked in video.
      * 
* - * repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12; + * + * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; * */ - public java.util.List - getTextAnnotationsBuilderList() { - return getTextAnnotationsFieldBuilder().getBuilderList(); + public java.util.List + getObjectAnnotationsBuilderList() { + return getObjectAnnotationsFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilderV3< - com.google.cloud.videointelligence.v1.TextAnnotation, - com.google.cloud.videointelligence.v1.TextAnnotation.Builder, - com.google.cloud.videointelligence.v1.TextAnnotationOrBuilder> - getTextAnnotationsFieldBuilder() { - if (textAnnotationsBuilder_ == null) { - textAnnotationsBuilder_ = + com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation, + com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.Builder, + com.google.cloud.videointelligence.v1.ObjectTrackingAnnotationOrBuilder> + getObjectAnnotationsFieldBuilder() { + if (objectAnnotationsBuilder_ == null) { + objectAnnotationsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< - com.google.cloud.videointelligence.v1.TextAnnotation, - com.google.cloud.videointelligence.v1.TextAnnotation.Builder, - com.google.cloud.videointelligence.v1.TextAnnotationOrBuilder>( - textAnnotations_, - ((bitField0_ & 0x00000100) != 0), + com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation, + com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.Builder, + com.google.cloud.videointelligence.v1.ObjectTrackingAnnotationOrBuilder>( + objectAnnotations_, + ((bitField0_ & 0x00000400) != 0), getParentForChildren(), isClean()); - textAnnotations_ = null; + objectAnnotations_ = null; } - return textAnnotationsBuilder_; + return objectAnnotationsBuilder_; } - private java.util.List - objectAnnotations_ = java.util.Collections.emptyList(); + private java.util.List + logoRecognitionAnnotations_ = java.util.Collections.emptyList(); - private void ensureObjectAnnotationsIsMutable() { - if (!((bitField0_ & 0x00000200) != 0)) { - objectAnnotations_ = - new java.util.ArrayList( - objectAnnotations_); - bitField0_ |= 0x00000200; + private void ensureLogoRecognitionAnnotationsIsMutable() { + if (!((bitField0_ & 0x00000800) != 0)) { + logoRecognitionAnnotations_ = + new java.util.ArrayList< + com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation>( + logoRecognitionAnnotations_); + bitField0_ |= 0x00000800; } } private com.google.protobuf.RepeatedFieldBuilderV3< - com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation, - com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.Builder, - com.google.cloud.videointelligence.v1.ObjectTrackingAnnotationOrBuilder> - objectAnnotationsBuilder_; + com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation, + com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation.Builder, + com.google.cloud.videointelligence.v1.LogoRecognitionAnnotationOrBuilder> + logoRecognitionAnnotationsBuilder_; /** * * *
-     * Annotations for list of objects detected and tracked in video.
+     * Annotations for list of logos detected, tracked and recognized in video.
      * 
* * - * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; + * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; * */ - public java.util.List - getObjectAnnotationsList() { - if (objectAnnotationsBuilder_ == null) { - return java.util.Collections.unmodifiableList(objectAnnotations_); + public java.util.List + getLogoRecognitionAnnotationsList() { + if (logoRecognitionAnnotationsBuilder_ == null) { + return java.util.Collections.unmodifiableList(logoRecognitionAnnotations_); } else { - return objectAnnotationsBuilder_.getMessageList(); + return logoRecognitionAnnotationsBuilder_.getMessageList(); } } /** * * *
-     * Annotations for list of objects detected and tracked in video.
+     * Annotations for list of logos detected, tracked and recognized in video.
      * 
* * - * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; + * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; * */ - public int getObjectAnnotationsCount() { - if (objectAnnotationsBuilder_ == null) { - return objectAnnotations_.size(); + public int getLogoRecognitionAnnotationsCount() { + if (logoRecognitionAnnotationsBuilder_ == null) { + return logoRecognitionAnnotations_.size(); } else { - return objectAnnotationsBuilder_.getCount(); + return logoRecognitionAnnotationsBuilder_.getCount(); } } /** * * *
-     * Annotations for list of objects detected and tracked in video.
+     * Annotations for list of logos detected, tracked and recognized in video.
      * 
* * - * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; + * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; * */ - public com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation getObjectAnnotations( - int index) { - if (objectAnnotationsBuilder_ == null) { - return objectAnnotations_.get(index); + public com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation + getLogoRecognitionAnnotations(int index) { + if (logoRecognitionAnnotationsBuilder_ == null) { + return logoRecognitionAnnotations_.get(index); } else { - return objectAnnotationsBuilder_.getMessage(index); + return logoRecognitionAnnotationsBuilder_.getMessage(index); } } /** * * *
-     * Annotations for list of objects detected and tracked in video.
+     * Annotations for list of logos detected, tracked and recognized in video.
      * 
* * - * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; + * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; * */ - public Builder setObjectAnnotations( - int index, com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation value) { - if (objectAnnotationsBuilder_ == null) { + public Builder setLogoRecognitionAnnotations( + int index, com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation value) { + if (logoRecognitionAnnotationsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureObjectAnnotationsIsMutable(); - objectAnnotations_.set(index, value); + ensureLogoRecognitionAnnotationsIsMutable(); + logoRecognitionAnnotations_.set(index, value); onChanged(); } else { - objectAnnotationsBuilder_.setMessage(index, value); + logoRecognitionAnnotationsBuilder_.setMessage(index, value); } return this; } @@ -6883,22 +8089,22 @@ public Builder setObjectAnnotations( * * *
-     * Annotations for list of objects detected and tracked in video.
+     * Annotations for list of logos detected, tracked and recognized in video.
      * 
* * - * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; + * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; * */ - public Builder setObjectAnnotations( + public Builder setLogoRecognitionAnnotations( int index, - com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.Builder builderForValue) { - if (objectAnnotationsBuilder_ == null) { - ensureObjectAnnotationsIsMutable(); - objectAnnotations_.set(index, builderForValue.build()); + com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation.Builder builderForValue) { + if (logoRecognitionAnnotationsBuilder_ == null) { + ensureLogoRecognitionAnnotationsIsMutable(); + logoRecognitionAnnotations_.set(index, builderForValue.build()); onChanged(); } else { - objectAnnotationsBuilder_.setMessage(index, builderForValue.build()); + logoRecognitionAnnotationsBuilder_.setMessage(index, builderForValue.build()); } return this; } @@ -6906,24 +8112,24 @@ public Builder setObjectAnnotations( * * *
-     * Annotations for list of objects detected and tracked in video.
+     * Annotations for list of logos detected, tracked and recognized in video.
      * 
* * - * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; + * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; * */ - public Builder addObjectAnnotations( - com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation value) { - if (objectAnnotationsBuilder_ == null) { + public Builder addLogoRecognitionAnnotations( + com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation value) { + if (logoRecognitionAnnotationsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureObjectAnnotationsIsMutable(); - objectAnnotations_.add(value); + ensureLogoRecognitionAnnotationsIsMutable(); + logoRecognitionAnnotations_.add(value); onChanged(); } else { - objectAnnotationsBuilder_.addMessage(value); + logoRecognitionAnnotationsBuilder_.addMessage(value); } return this; } @@ -6931,24 +8137,24 @@ public Builder addObjectAnnotations( * * *
-     * Annotations for list of objects detected and tracked in video.
+     * Annotations for list of logos detected, tracked and recognized in video.
      * 
* * - * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; + * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; * */ - public Builder addObjectAnnotations( - int index, com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation value) { - if (objectAnnotationsBuilder_ == null) { + public Builder addLogoRecognitionAnnotations( + int index, com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation value) { + if (logoRecognitionAnnotationsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureObjectAnnotationsIsMutable(); - objectAnnotations_.add(index, value); + ensureLogoRecognitionAnnotationsIsMutable(); + logoRecognitionAnnotations_.add(index, value); onChanged(); } else { - objectAnnotationsBuilder_.addMessage(index, value); + logoRecognitionAnnotationsBuilder_.addMessage(index, value); } return this; } @@ -6956,21 +8162,21 @@ public Builder addObjectAnnotations( * * *
-     * Annotations for list of objects detected and tracked in video.
+     * Annotations for list of logos detected, tracked and recognized in video.
      * 
* * - * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; + * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; * */ - public Builder addObjectAnnotations( - com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.Builder builderForValue) { - if (objectAnnotationsBuilder_ == null) { - ensureObjectAnnotationsIsMutable(); - objectAnnotations_.add(builderForValue.build()); + public Builder addLogoRecognitionAnnotations( + com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation.Builder builderForValue) { + if (logoRecognitionAnnotationsBuilder_ == null) { + ensureLogoRecognitionAnnotationsIsMutable(); + logoRecognitionAnnotations_.add(builderForValue.build()); onChanged(); } else { - objectAnnotationsBuilder_.addMessage(builderForValue.build()); + logoRecognitionAnnotationsBuilder_.addMessage(builderForValue.build()); } return this; } @@ -6978,22 +8184,22 @@ public Builder addObjectAnnotations( * * *
-     * Annotations for list of objects detected and tracked in video.
+     * Annotations for list of logos detected, tracked and recognized in video.
      * 
* * - * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; + * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; * */ - public Builder addObjectAnnotations( + public Builder addLogoRecognitionAnnotations( int index, - com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.Builder builderForValue) { - if (objectAnnotationsBuilder_ == null) { - ensureObjectAnnotationsIsMutable(); - objectAnnotations_.add(index, builderForValue.build()); + com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation.Builder builderForValue) { + if (logoRecognitionAnnotationsBuilder_ == null) { + ensureLogoRecognitionAnnotationsIsMutable(); + logoRecognitionAnnotations_.add(index, builderForValue.build()); onChanged(); } else { - objectAnnotationsBuilder_.addMessage(index, builderForValue.build()); + logoRecognitionAnnotationsBuilder_.addMessage(index, builderForValue.build()); } return this; } @@ -7001,22 +8207,23 @@ public Builder addObjectAnnotations( * * *
-     * Annotations for list of objects detected and tracked in video.
+     * Annotations for list of logos detected, tracked and recognized in video.
      * 
* * - * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; + * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; * */ - public Builder addAllObjectAnnotations( - java.lang.Iterable + public Builder addAllLogoRecognitionAnnotations( + java.lang.Iterable< + ? extends com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation> values) { - if (objectAnnotationsBuilder_ == null) { - ensureObjectAnnotationsIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll(values, objectAnnotations_); + if (logoRecognitionAnnotationsBuilder_ == null) { + ensureLogoRecognitionAnnotationsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, logoRecognitionAnnotations_); onChanged(); } else { - objectAnnotationsBuilder_.addAllMessages(values); + logoRecognitionAnnotationsBuilder_.addAllMessages(values); } return this; } @@ -7024,20 +8231,20 @@ public Builder addAllObjectAnnotations( * * *
-     * Annotations for list of objects detected and tracked in video.
+     * Annotations for list of logos detected, tracked and recognized in video.
      * 
* * - * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; + * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; * */ - public Builder clearObjectAnnotations() { - if (objectAnnotationsBuilder_ == null) { - objectAnnotations_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000200); + public Builder clearLogoRecognitionAnnotations() { + if (logoRecognitionAnnotationsBuilder_ == null) { + logoRecognitionAnnotations_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000800); onChanged(); } else { - objectAnnotationsBuilder_.clear(); + logoRecognitionAnnotationsBuilder_.clear(); } return this; } @@ -7045,20 +8252,20 @@ public Builder clearObjectAnnotations() { * * *
-     * Annotations for list of objects detected and tracked in video.
+     * Annotations for list of logos detected, tracked and recognized in video.
      * 
* * - * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; - * - */ - public Builder removeObjectAnnotations(int index) { - if (objectAnnotationsBuilder_ == null) { - ensureObjectAnnotationsIsMutable(); - objectAnnotations_.remove(index); + * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; + *
+ */ + public Builder removeLogoRecognitionAnnotations(int index) { + if (logoRecognitionAnnotationsBuilder_ == null) { + ensureLogoRecognitionAnnotationsIsMutable(); + logoRecognitionAnnotations_.remove(index); onChanged(); } else { - objectAnnotationsBuilder_.remove(index); + logoRecognitionAnnotationsBuilder_.remove(index); } return this; } @@ -7066,224 +8273,224 @@ public Builder removeObjectAnnotations(int index) { * * *
-     * Annotations for list of objects detected and tracked in video.
+     * Annotations for list of logos detected, tracked and recognized in video.
      * 
* * - * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; + * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; * */ - public com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.Builder - getObjectAnnotationsBuilder(int index) { - return getObjectAnnotationsFieldBuilder().getBuilder(index); + public com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation.Builder + getLogoRecognitionAnnotationsBuilder(int index) { + return getLogoRecognitionAnnotationsFieldBuilder().getBuilder(index); } /** * * *
-     * Annotations for list of objects detected and tracked in video.
+     * Annotations for list of logos detected, tracked and recognized in video.
      * 
* * - * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; + * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; * */ - public com.google.cloud.videointelligence.v1.ObjectTrackingAnnotationOrBuilder - getObjectAnnotationsOrBuilder(int index) { - if (objectAnnotationsBuilder_ == null) { - return objectAnnotations_.get(index); + public com.google.cloud.videointelligence.v1.LogoRecognitionAnnotationOrBuilder + getLogoRecognitionAnnotationsOrBuilder(int index) { + if (logoRecognitionAnnotationsBuilder_ == null) { + return logoRecognitionAnnotations_.get(index); } else { - return objectAnnotationsBuilder_.getMessageOrBuilder(index); + return logoRecognitionAnnotationsBuilder_.getMessageOrBuilder(index); } } /** * * *
-     * Annotations for list of objects detected and tracked in video.
+     * Annotations for list of logos detected, tracked and recognized in video.
      * 
* * - * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; + * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; * */ public java.util.List< - ? extends com.google.cloud.videointelligence.v1.ObjectTrackingAnnotationOrBuilder> - getObjectAnnotationsOrBuilderList() { - if (objectAnnotationsBuilder_ != null) { - return objectAnnotationsBuilder_.getMessageOrBuilderList(); + ? extends com.google.cloud.videointelligence.v1.LogoRecognitionAnnotationOrBuilder> + getLogoRecognitionAnnotationsOrBuilderList() { + if (logoRecognitionAnnotationsBuilder_ != null) { + return logoRecognitionAnnotationsBuilder_.getMessageOrBuilderList(); } else { - return java.util.Collections.unmodifiableList(objectAnnotations_); + return java.util.Collections.unmodifiableList(logoRecognitionAnnotations_); } } /** * * *
-     * Annotations for list of objects detected and tracked in video.
+     * Annotations for list of logos detected, tracked and recognized in video.
      * 
* * - * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; + * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; * */ - public com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.Builder - addObjectAnnotationsBuilder() { - return getObjectAnnotationsFieldBuilder() + public com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation.Builder + addLogoRecognitionAnnotationsBuilder() { + return getLogoRecognitionAnnotationsFieldBuilder() .addBuilder( - com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.getDefaultInstance()); + com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation.getDefaultInstance()); } /** * * *
-     * Annotations for list of objects detected and tracked in video.
+     * Annotations for list of logos detected, tracked and recognized in video.
      * 
* * - * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; + * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; * */ - public com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.Builder - addObjectAnnotationsBuilder(int index) { - return getObjectAnnotationsFieldBuilder() + public com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation.Builder + addLogoRecognitionAnnotationsBuilder(int index) { + return getLogoRecognitionAnnotationsFieldBuilder() .addBuilder( index, - com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.getDefaultInstance()); + com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation.getDefaultInstance()); } /** * * *
-     * Annotations for list of objects detected and tracked in video.
+     * Annotations for list of logos detected, tracked and recognized in video.
      * 
* * - * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14; + * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; * */ - public java.util.List - getObjectAnnotationsBuilderList() { - return getObjectAnnotationsFieldBuilder().getBuilderList(); + public java.util.List + getLogoRecognitionAnnotationsBuilderList() { + return getLogoRecognitionAnnotationsFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilderV3< - com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation, - com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.Builder, - com.google.cloud.videointelligence.v1.ObjectTrackingAnnotationOrBuilder> - getObjectAnnotationsFieldBuilder() { - if (objectAnnotationsBuilder_ == null) { - objectAnnotationsBuilder_ = + com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation, + com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation.Builder, + com.google.cloud.videointelligence.v1.LogoRecognitionAnnotationOrBuilder> + getLogoRecognitionAnnotationsFieldBuilder() { + if (logoRecognitionAnnotationsBuilder_ == null) { + logoRecognitionAnnotationsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< - com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation, - com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.Builder, - com.google.cloud.videointelligence.v1.ObjectTrackingAnnotationOrBuilder>( - objectAnnotations_, - ((bitField0_ & 0x00000200) != 0), + com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation, + com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation.Builder, + com.google.cloud.videointelligence.v1.LogoRecognitionAnnotationOrBuilder>( + logoRecognitionAnnotations_, + ((bitField0_ & 0x00000800) != 0), getParentForChildren(), isClean()); - objectAnnotations_ = null; + logoRecognitionAnnotations_ = null; } - return objectAnnotationsBuilder_; + return logoRecognitionAnnotationsBuilder_; } - private java.util.List - logoRecognitionAnnotations_ = java.util.Collections.emptyList(); + private java.util.List + personDetectionAnnotations_ = java.util.Collections.emptyList(); - private void ensureLogoRecognitionAnnotationsIsMutable() { - if (!((bitField0_ & 0x00000400) != 0)) { - logoRecognitionAnnotations_ = + private void ensurePersonDetectionAnnotationsIsMutable() { + if (!((bitField0_ & 0x00001000) != 0)) { + personDetectionAnnotations_ = new java.util.ArrayList< - com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation>( - logoRecognitionAnnotations_); - bitField0_ |= 0x00000400; + com.google.cloud.videointelligence.v1.PersonDetectionAnnotation>( + personDetectionAnnotations_); + bitField0_ |= 0x00001000; } } private com.google.protobuf.RepeatedFieldBuilderV3< - com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation, - com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation.Builder, - com.google.cloud.videointelligence.v1.LogoRecognitionAnnotationOrBuilder> - logoRecognitionAnnotationsBuilder_; + com.google.cloud.videointelligence.v1.PersonDetectionAnnotation, + com.google.cloud.videointelligence.v1.PersonDetectionAnnotation.Builder, + com.google.cloud.videointelligence.v1.PersonDetectionAnnotationOrBuilder> + personDetectionAnnotationsBuilder_; /** * * *
-     * Annotations for list of logos detected, tracked and recognized in video.
+     * Person detection annotations.
      * 
* * - * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; + * repeated .google.cloud.videointelligence.v1.PersonDetectionAnnotation person_detection_annotations = 20; * */ - public java.util.List - getLogoRecognitionAnnotationsList() { - if (logoRecognitionAnnotationsBuilder_ == null) { - return java.util.Collections.unmodifiableList(logoRecognitionAnnotations_); + public java.util.List + getPersonDetectionAnnotationsList() { + if (personDetectionAnnotationsBuilder_ == null) { + return java.util.Collections.unmodifiableList(personDetectionAnnotations_); } else { - return logoRecognitionAnnotationsBuilder_.getMessageList(); + return personDetectionAnnotationsBuilder_.getMessageList(); } } /** * * *
-     * Annotations for list of logos detected, tracked and recognized in video.
+     * Person detection annotations.
      * 
* * - * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; + * repeated .google.cloud.videointelligence.v1.PersonDetectionAnnotation person_detection_annotations = 20; * */ - public int getLogoRecognitionAnnotationsCount() { - if (logoRecognitionAnnotationsBuilder_ == null) { - return logoRecognitionAnnotations_.size(); + public int getPersonDetectionAnnotationsCount() { + if (personDetectionAnnotationsBuilder_ == null) { + return personDetectionAnnotations_.size(); } else { - return logoRecognitionAnnotationsBuilder_.getCount(); + return personDetectionAnnotationsBuilder_.getCount(); } } /** * * *
-     * Annotations for list of logos detected, tracked and recognized in video.
+     * Person detection annotations.
      * 
* * - * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; + * repeated .google.cloud.videointelligence.v1.PersonDetectionAnnotation person_detection_annotations = 20; * */ - public com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation - getLogoRecognitionAnnotations(int index) { - if (logoRecognitionAnnotationsBuilder_ == null) { - return logoRecognitionAnnotations_.get(index); + public com.google.cloud.videointelligence.v1.PersonDetectionAnnotation + getPersonDetectionAnnotations(int index) { + if (personDetectionAnnotationsBuilder_ == null) { + return personDetectionAnnotations_.get(index); } else { - return logoRecognitionAnnotationsBuilder_.getMessage(index); + return personDetectionAnnotationsBuilder_.getMessage(index); } } /** * * *
-     * Annotations for list of logos detected, tracked and recognized in video.
+     * Person detection annotations.
      * 
* * - * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; + * repeated .google.cloud.videointelligence.v1.PersonDetectionAnnotation person_detection_annotations = 20; * */ - public Builder setLogoRecognitionAnnotations( - int index, com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation value) { - if (logoRecognitionAnnotationsBuilder_ == null) { + public Builder setPersonDetectionAnnotations( + int index, com.google.cloud.videointelligence.v1.PersonDetectionAnnotation value) { + if (personDetectionAnnotationsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureLogoRecognitionAnnotationsIsMutable(); - logoRecognitionAnnotations_.set(index, value); + ensurePersonDetectionAnnotationsIsMutable(); + personDetectionAnnotations_.set(index, value); onChanged(); } else { - logoRecognitionAnnotationsBuilder_.setMessage(index, value); + personDetectionAnnotationsBuilder_.setMessage(index, value); } return this; } @@ -7291,22 +8498,22 @@ public Builder setLogoRecognitionAnnotations( * * *
-     * Annotations for list of logos detected, tracked and recognized in video.
+     * Person detection annotations.
      * 
* * - * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; + * repeated .google.cloud.videointelligence.v1.PersonDetectionAnnotation person_detection_annotations = 20; * */ - public Builder setLogoRecognitionAnnotations( + public Builder setPersonDetectionAnnotations( int index, - com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation.Builder builderForValue) { - if (logoRecognitionAnnotationsBuilder_ == null) { - ensureLogoRecognitionAnnotationsIsMutable(); - logoRecognitionAnnotations_.set(index, builderForValue.build()); + com.google.cloud.videointelligence.v1.PersonDetectionAnnotation.Builder builderForValue) { + if (personDetectionAnnotationsBuilder_ == null) { + ensurePersonDetectionAnnotationsIsMutable(); + personDetectionAnnotations_.set(index, builderForValue.build()); onChanged(); } else { - logoRecognitionAnnotationsBuilder_.setMessage(index, builderForValue.build()); + personDetectionAnnotationsBuilder_.setMessage(index, builderForValue.build()); } return this; } @@ -7314,24 +8521,24 @@ public Builder setLogoRecognitionAnnotations( * * *
-     * Annotations for list of logos detected, tracked and recognized in video.
+     * Person detection annotations.
      * 
* * - * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; + * repeated .google.cloud.videointelligence.v1.PersonDetectionAnnotation person_detection_annotations = 20; * */ - public Builder addLogoRecognitionAnnotations( - com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation value) { - if (logoRecognitionAnnotationsBuilder_ == null) { + public Builder addPersonDetectionAnnotations( + com.google.cloud.videointelligence.v1.PersonDetectionAnnotation value) { + if (personDetectionAnnotationsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureLogoRecognitionAnnotationsIsMutable(); - logoRecognitionAnnotations_.add(value); + ensurePersonDetectionAnnotationsIsMutable(); + personDetectionAnnotations_.add(value); onChanged(); } else { - logoRecognitionAnnotationsBuilder_.addMessage(value); + personDetectionAnnotationsBuilder_.addMessage(value); } return this; } @@ -7339,24 +8546,24 @@ public Builder addLogoRecognitionAnnotations( * * *
-     * Annotations for list of logos detected, tracked and recognized in video.
+     * Person detection annotations.
      * 
* * - * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; + * repeated .google.cloud.videointelligence.v1.PersonDetectionAnnotation person_detection_annotations = 20; * */ - public Builder addLogoRecognitionAnnotations( - int index, com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation value) { - if (logoRecognitionAnnotationsBuilder_ == null) { + public Builder addPersonDetectionAnnotations( + int index, com.google.cloud.videointelligence.v1.PersonDetectionAnnotation value) { + if (personDetectionAnnotationsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureLogoRecognitionAnnotationsIsMutable(); - logoRecognitionAnnotations_.add(index, value); + ensurePersonDetectionAnnotationsIsMutable(); + personDetectionAnnotations_.add(index, value); onChanged(); } else { - logoRecognitionAnnotationsBuilder_.addMessage(index, value); + personDetectionAnnotationsBuilder_.addMessage(index, value); } return this; } @@ -7364,21 +8571,21 @@ public Builder addLogoRecognitionAnnotations( * * *
-     * Annotations for list of logos detected, tracked and recognized in video.
+     * Person detection annotations.
      * 
* * - * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; + * repeated .google.cloud.videointelligence.v1.PersonDetectionAnnotation person_detection_annotations = 20; * */ - public Builder addLogoRecognitionAnnotations( - com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation.Builder builderForValue) { - if (logoRecognitionAnnotationsBuilder_ == null) { - ensureLogoRecognitionAnnotationsIsMutable(); - logoRecognitionAnnotations_.add(builderForValue.build()); + public Builder addPersonDetectionAnnotations( + com.google.cloud.videointelligence.v1.PersonDetectionAnnotation.Builder builderForValue) { + if (personDetectionAnnotationsBuilder_ == null) { + ensurePersonDetectionAnnotationsIsMutable(); + personDetectionAnnotations_.add(builderForValue.build()); onChanged(); } else { - logoRecognitionAnnotationsBuilder_.addMessage(builderForValue.build()); + personDetectionAnnotationsBuilder_.addMessage(builderForValue.build()); } return this; } @@ -7386,22 +8593,22 @@ public Builder addLogoRecognitionAnnotations( * * *
-     * Annotations for list of logos detected, tracked and recognized in video.
+     * Person detection annotations.
      * 
* * - * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; + * repeated .google.cloud.videointelligence.v1.PersonDetectionAnnotation person_detection_annotations = 20; * */ - public Builder addLogoRecognitionAnnotations( + public Builder addPersonDetectionAnnotations( int index, - com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation.Builder builderForValue) { - if (logoRecognitionAnnotationsBuilder_ == null) { - ensureLogoRecognitionAnnotationsIsMutable(); - logoRecognitionAnnotations_.add(index, builderForValue.build()); + com.google.cloud.videointelligence.v1.PersonDetectionAnnotation.Builder builderForValue) { + if (personDetectionAnnotationsBuilder_ == null) { + ensurePersonDetectionAnnotationsIsMutable(); + personDetectionAnnotations_.add(index, builderForValue.build()); onChanged(); } else { - logoRecognitionAnnotationsBuilder_.addMessage(index, builderForValue.build()); + personDetectionAnnotationsBuilder_.addMessage(index, builderForValue.build()); } return this; } @@ -7409,23 +8616,23 @@ public Builder addLogoRecognitionAnnotations( * * *
-     * Annotations for list of logos detected, tracked and recognized in video.
+     * Person detection annotations.
      * 
* * - * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; + * repeated .google.cloud.videointelligence.v1.PersonDetectionAnnotation person_detection_annotations = 20; * */ - public Builder addAllLogoRecognitionAnnotations( + public Builder addAllPersonDetectionAnnotations( java.lang.Iterable< - ? extends com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation> + ? extends com.google.cloud.videointelligence.v1.PersonDetectionAnnotation> values) { - if (logoRecognitionAnnotationsBuilder_ == null) { - ensureLogoRecognitionAnnotationsIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll(values, logoRecognitionAnnotations_); + if (personDetectionAnnotationsBuilder_ == null) { + ensurePersonDetectionAnnotationsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, personDetectionAnnotations_); onChanged(); } else { - logoRecognitionAnnotationsBuilder_.addAllMessages(values); + personDetectionAnnotationsBuilder_.addAllMessages(values); } return this; } @@ -7433,20 +8640,20 @@ public Builder addAllLogoRecognitionAnnotations( * * *
-     * Annotations for list of logos detected, tracked and recognized in video.
+     * Person detection annotations.
      * 
* * - * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; + * repeated .google.cloud.videointelligence.v1.PersonDetectionAnnotation person_detection_annotations = 20; * */ - public Builder clearLogoRecognitionAnnotations() { - if (logoRecognitionAnnotationsBuilder_ == null) { - logoRecognitionAnnotations_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000400); + public Builder clearPersonDetectionAnnotations() { + if (personDetectionAnnotationsBuilder_ == null) { + personDetectionAnnotations_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00001000); onChanged(); } else { - logoRecognitionAnnotationsBuilder_.clear(); + personDetectionAnnotationsBuilder_.clear(); } return this; } @@ -7454,20 +8661,20 @@ public Builder clearLogoRecognitionAnnotations() { * * *
-     * Annotations for list of logos detected, tracked and recognized in video.
+     * Person detection annotations.
      * 
* * - * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; + * repeated .google.cloud.videointelligence.v1.PersonDetectionAnnotation person_detection_annotations = 20; * */ - public Builder removeLogoRecognitionAnnotations(int index) { - if (logoRecognitionAnnotationsBuilder_ == null) { - ensureLogoRecognitionAnnotationsIsMutable(); - logoRecognitionAnnotations_.remove(index); + public Builder removePersonDetectionAnnotations(int index) { + if (personDetectionAnnotationsBuilder_ == null) { + ensurePersonDetectionAnnotationsIsMutable(); + personDetectionAnnotations_.remove(index); onChanged(); } else { - logoRecognitionAnnotationsBuilder_.remove(index); + personDetectionAnnotationsBuilder_.remove(index); } return this; } @@ -7475,125 +8682,125 @@ public Builder removeLogoRecognitionAnnotations(int index) { * * *
-     * Annotations for list of logos detected, tracked and recognized in video.
+     * Person detection annotations.
      * 
* * - * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; + * repeated .google.cloud.videointelligence.v1.PersonDetectionAnnotation person_detection_annotations = 20; * */ - public com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation.Builder - getLogoRecognitionAnnotationsBuilder(int index) { - return getLogoRecognitionAnnotationsFieldBuilder().getBuilder(index); + public com.google.cloud.videointelligence.v1.PersonDetectionAnnotation.Builder + getPersonDetectionAnnotationsBuilder(int index) { + return getPersonDetectionAnnotationsFieldBuilder().getBuilder(index); } /** * * *
-     * Annotations for list of logos detected, tracked and recognized in video.
+     * Person detection annotations.
      * 
* * - * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; + * repeated .google.cloud.videointelligence.v1.PersonDetectionAnnotation person_detection_annotations = 20; * */ - public com.google.cloud.videointelligence.v1.LogoRecognitionAnnotationOrBuilder - getLogoRecognitionAnnotationsOrBuilder(int index) { - if (logoRecognitionAnnotationsBuilder_ == null) { - return logoRecognitionAnnotations_.get(index); + public com.google.cloud.videointelligence.v1.PersonDetectionAnnotationOrBuilder + getPersonDetectionAnnotationsOrBuilder(int index) { + if (personDetectionAnnotationsBuilder_ == null) { + return personDetectionAnnotations_.get(index); } else { - return logoRecognitionAnnotationsBuilder_.getMessageOrBuilder(index); + return personDetectionAnnotationsBuilder_.getMessageOrBuilder(index); } } /** * * *
-     * Annotations for list of logos detected, tracked and recognized in video.
+     * Person detection annotations.
      * 
* * - * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; + * repeated .google.cloud.videointelligence.v1.PersonDetectionAnnotation person_detection_annotations = 20; * */ public java.util.List< - ? extends com.google.cloud.videointelligence.v1.LogoRecognitionAnnotationOrBuilder> - getLogoRecognitionAnnotationsOrBuilderList() { - if (logoRecognitionAnnotationsBuilder_ != null) { - return logoRecognitionAnnotationsBuilder_.getMessageOrBuilderList(); + ? extends com.google.cloud.videointelligence.v1.PersonDetectionAnnotationOrBuilder> + getPersonDetectionAnnotationsOrBuilderList() { + if (personDetectionAnnotationsBuilder_ != null) { + return personDetectionAnnotationsBuilder_.getMessageOrBuilderList(); } else { - return java.util.Collections.unmodifiableList(logoRecognitionAnnotations_); + return java.util.Collections.unmodifiableList(personDetectionAnnotations_); } } /** * * *
-     * Annotations for list of logos detected, tracked and recognized in video.
+     * Person detection annotations.
      * 
* * - * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; + * repeated .google.cloud.videointelligence.v1.PersonDetectionAnnotation person_detection_annotations = 20; * */ - public com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation.Builder - addLogoRecognitionAnnotationsBuilder() { - return getLogoRecognitionAnnotationsFieldBuilder() + public com.google.cloud.videointelligence.v1.PersonDetectionAnnotation.Builder + addPersonDetectionAnnotationsBuilder() { + return getPersonDetectionAnnotationsFieldBuilder() .addBuilder( - com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation.getDefaultInstance()); + com.google.cloud.videointelligence.v1.PersonDetectionAnnotation.getDefaultInstance()); } /** * * *
-     * Annotations for list of logos detected, tracked and recognized in video.
+     * Person detection annotations.
      * 
* * - * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; + * repeated .google.cloud.videointelligence.v1.PersonDetectionAnnotation person_detection_annotations = 20; * */ - public com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation.Builder - addLogoRecognitionAnnotationsBuilder(int index) { - return getLogoRecognitionAnnotationsFieldBuilder() + public com.google.cloud.videointelligence.v1.PersonDetectionAnnotation.Builder + addPersonDetectionAnnotationsBuilder(int index) { + return getPersonDetectionAnnotationsFieldBuilder() .addBuilder( index, - com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation.getDefaultInstance()); + com.google.cloud.videointelligence.v1.PersonDetectionAnnotation.getDefaultInstance()); } /** * * *
-     * Annotations for list of logos detected, tracked and recognized in video.
+     * Person detection annotations.
      * 
* * - * repeated .google.cloud.videointelligence.v1.LogoRecognitionAnnotation logo_recognition_annotations = 19; + * repeated .google.cloud.videointelligence.v1.PersonDetectionAnnotation person_detection_annotations = 20; * */ - public java.util.List - getLogoRecognitionAnnotationsBuilderList() { - return getLogoRecognitionAnnotationsFieldBuilder().getBuilderList(); + public java.util.List + getPersonDetectionAnnotationsBuilderList() { + return getPersonDetectionAnnotationsFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilderV3< - com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation, - com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation.Builder, - com.google.cloud.videointelligence.v1.LogoRecognitionAnnotationOrBuilder> - getLogoRecognitionAnnotationsFieldBuilder() { - if (logoRecognitionAnnotationsBuilder_ == null) { - logoRecognitionAnnotationsBuilder_ = + com.google.cloud.videointelligence.v1.PersonDetectionAnnotation, + com.google.cloud.videointelligence.v1.PersonDetectionAnnotation.Builder, + com.google.cloud.videointelligence.v1.PersonDetectionAnnotationOrBuilder> + getPersonDetectionAnnotationsFieldBuilder() { + if (personDetectionAnnotationsBuilder_ == null) { + personDetectionAnnotationsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< - com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation, - com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation.Builder, - com.google.cloud.videointelligence.v1.LogoRecognitionAnnotationOrBuilder>( - logoRecognitionAnnotations_, - ((bitField0_ & 0x00000400) != 0), + com.google.cloud.videointelligence.v1.PersonDetectionAnnotation, + com.google.cloud.videointelligence.v1.PersonDetectionAnnotation.Builder, + com.google.cloud.videointelligence.v1.PersonDetectionAnnotationOrBuilder>( + personDetectionAnnotations_, + ((bitField0_ & 0x00001000) != 0), getParentForChildren(), isClean()); - logoRecognitionAnnotations_ = null; + personDetectionAnnotations_ = null; } - return logoRecognitionAnnotationsBuilder_; + return personDetectionAnnotationsBuilder_; } private com.google.rpc.Status error_; diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoAnnotationResultsOrBuilder.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoAnnotationResultsOrBuilder.java index 7f1fc9d1e..c6e6456a2 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoAnnotationResultsOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoAnnotationResultsOrBuilder.java @@ -28,7 +28,7 @@ public interface VideoAnnotationResultsOrBuilder * *
    * Video file location in
-   * [Google Cloud Storage](https://cloud.google.com/storage/).
+   * [Cloud Storage](https://cloud.google.com/storage/).
    * 
* * string input_uri = 1; @@ -41,7 +41,7 @@ public interface VideoAnnotationResultsOrBuilder * *
    * Video file location in
-   * [Google Cloud Storage](https://cloud.google.com/storage/).
+   * [Cloud Storage](https://cloud.google.com/storage/).
    * 
* * string input_uri = 1; @@ -89,7 +89,7 @@ public interface VideoAnnotationResultsOrBuilder * * *
-   * Topical label annotations on video level or user specified segment level.
+   * Topical label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label.
    * 
* @@ -103,7 +103,7 @@ public interface VideoAnnotationResultsOrBuilder * * *
-   * Topical label annotations on video level or user specified segment level.
+   * Topical label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label.
    * 
* @@ -116,7 +116,7 @@ public interface VideoAnnotationResultsOrBuilder * * *
-   * Topical label annotations on video level or user specified segment level.
+   * Topical label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label.
    * 
* @@ -129,7 +129,7 @@ public interface VideoAnnotationResultsOrBuilder * * *
-   * Topical label annotations on video level or user specified segment level.
+   * Topical label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label.
    * 
* @@ -143,7 +143,7 @@ public interface VideoAnnotationResultsOrBuilder * * *
-   * Topical label annotations on video level or user specified segment level.
+   * Topical label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label.
    * 
* @@ -158,7 +158,7 @@ public interface VideoAnnotationResultsOrBuilder * * *
-   * Presence label annotations on video level or user specified segment level.
+   * Presence label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label. Compared to the
    * existing topical `segment_label_annotations`, this field presents more
    * fine-grained, segment-level labels detected in video content and is made
@@ -176,7 +176,7 @@ public interface VideoAnnotationResultsOrBuilder
    *
    *
    * 
-   * Presence label annotations on video level or user specified segment level.
+   * Presence label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label. Compared to the
    * existing topical `segment_label_annotations`, this field presents more
    * fine-grained, segment-level labels detected in video content and is made
@@ -194,7 +194,7 @@ com.google.cloud.videointelligence.v1.LabelAnnotation getSegmentPresenceLabelAnn
    *
    *
    * 
-   * Presence label annotations on video level or user specified segment level.
+   * Presence label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label. Compared to the
    * existing topical `segment_label_annotations`, this field presents more
    * fine-grained, segment-level labels detected in video content and is made
@@ -211,7 +211,7 @@ com.google.cloud.videointelligence.v1.LabelAnnotation getSegmentPresenceLabelAnn
    *
    *
    * 
-   * Presence label annotations on video level or user specified segment level.
+   * Presence label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label. Compared to the
    * existing topical `segment_label_annotations`, this field presents more
    * fine-grained, segment-level labels detected in video content and is made
@@ -229,7 +229,7 @@ com.google.cloud.videointelligence.v1.LabelAnnotation getSegmentPresenceLabelAnn
    *
    *
    * 
-   * Presence label annotations on video level or user specified segment level.
+   * Presence label annotations on video level or user-specified segment level.
    * There is exactly one element for each unique label. Compared to the
    * existing topical `segment_label_annotations`, this field presents more
    * fine-grained, segment-level labels detected in video content and is made
@@ -460,55 +460,135 @@ com.google.cloud.videointelligence.v1.LabelAnnotationOrBuilder getFrameLabelAnno
    *
    *
    * 
-   * Face annotations. There is exactly one element for each unique face.
+   * Deprecated. Please use `face_detection_annotations` instead.
    * 
* - * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5; + * + * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5 [deprecated = true]; + * */ + @java.lang.Deprecated java.util.List getFaceAnnotationsList(); /** * * *
-   * Face annotations. There is exactly one element for each unique face.
+   * Deprecated. Please use `face_detection_annotations` instead.
    * 
* - * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5; + * + * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5 [deprecated = true]; + * */ + @java.lang.Deprecated com.google.cloud.videointelligence.v1.FaceAnnotation getFaceAnnotations(int index); /** * * *
-   * Face annotations. There is exactly one element for each unique face.
+   * Deprecated. Please use `face_detection_annotations` instead.
    * 
* - * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5; + * + * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5 [deprecated = true]; + * */ + @java.lang.Deprecated int getFaceAnnotationsCount(); /** * * *
-   * Face annotations. There is exactly one element for each unique face.
+   * Deprecated. Please use `face_detection_annotations` instead.
    * 
* - * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5; + * + * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5 [deprecated = true]; + * */ + @java.lang.Deprecated java.util.List getFaceAnnotationsOrBuilderList(); /** * * *
-   * Face annotations. There is exactly one element for each unique face.
+   * Deprecated. Please use `face_detection_annotations` instead.
    * 
* - * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5; + * + * repeated .google.cloud.videointelligence.v1.FaceAnnotation face_annotations = 5 [deprecated = true]; + * */ + @java.lang.Deprecated com.google.cloud.videointelligence.v1.FaceAnnotationOrBuilder getFaceAnnotationsOrBuilder( int index); + /** + * + * + *
+   * Face detection annotations.
+   * 
+ * + * + * repeated .google.cloud.videointelligence.v1.FaceDetectionAnnotation face_detection_annotations = 13; + * + */ + java.util.List + getFaceDetectionAnnotationsList(); + /** + * + * + *
+   * Face detection annotations.
+   * 
+ * + * + * repeated .google.cloud.videointelligence.v1.FaceDetectionAnnotation face_detection_annotations = 13; + * + */ + com.google.cloud.videointelligence.v1.FaceDetectionAnnotation getFaceDetectionAnnotations( + int index); + /** + * + * + *
+   * Face detection annotations.
+   * 
+ * + * + * repeated .google.cloud.videointelligence.v1.FaceDetectionAnnotation face_detection_annotations = 13; + * + */ + int getFaceDetectionAnnotationsCount(); + /** + * + * + *
+   * Face detection annotations.
+   * 
+ * + * + * repeated .google.cloud.videointelligence.v1.FaceDetectionAnnotation face_detection_annotations = 13; + * + */ + java.util.List + getFaceDetectionAnnotationsOrBuilderList(); + /** + * + * + *
+   * Face detection annotations.
+   * 
+ * + * + * repeated .google.cloud.videointelligence.v1.FaceDetectionAnnotation face_detection_annotations = 13; + * + */ + com.google.cloud.videointelligence.v1.FaceDetectionAnnotationOrBuilder + getFaceDetectionAnnotationsOrBuilder(int index); + /** * * @@ -857,6 +937,71 @@ com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation getLogoRecogniti com.google.cloud.videointelligence.v1.LogoRecognitionAnnotationOrBuilder getLogoRecognitionAnnotationsOrBuilder(int index); + /** + * + * + *
+   * Person detection annotations.
+   * 
+ * + * + * repeated .google.cloud.videointelligence.v1.PersonDetectionAnnotation person_detection_annotations = 20; + * + */ + java.util.List + getPersonDetectionAnnotationsList(); + /** + * + * + *
+   * Person detection annotations.
+   * 
+ * + * + * repeated .google.cloud.videointelligence.v1.PersonDetectionAnnotation person_detection_annotations = 20; + * + */ + com.google.cloud.videointelligence.v1.PersonDetectionAnnotation getPersonDetectionAnnotations( + int index); + /** + * + * + *
+   * Person detection annotations.
+   * 
+ * + * + * repeated .google.cloud.videointelligence.v1.PersonDetectionAnnotation person_detection_annotations = 20; + * + */ + int getPersonDetectionAnnotationsCount(); + /** + * + * + *
+   * Person detection annotations.
+   * 
+ * + * + * repeated .google.cloud.videointelligence.v1.PersonDetectionAnnotation person_detection_annotations = 20; + * + */ + java.util.List + getPersonDetectionAnnotationsOrBuilderList(); + /** + * + * + *
+   * Person detection annotations.
+   * 
+ * + * + * repeated .google.cloud.videointelligence.v1.PersonDetectionAnnotation person_detection_annotations = 20; + * + */ + com.google.cloud.videointelligence.v1.PersonDetectionAnnotationOrBuilder + getPersonDetectionAnnotationsOrBuilder(int index); + /** * * diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoContext.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoContext.java index 955052518..758c73a4f 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoContext.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoContext.java @@ -187,6 +187,23 @@ private VideoContext( textDetectionConfig_ = subBuilder.buildPartial(); } + break; + } + case 90: + { + com.google.cloud.videointelligence.v1.PersonDetectionConfig.Builder subBuilder = null; + if (personDetectionConfig_ != null) { + subBuilder = personDetectionConfig_.toBuilder(); + } + personDetectionConfig_ = + input.readMessage( + com.google.cloud.videointelligence.v1.PersonDetectionConfig.parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(personDetectionConfig_); + personDetectionConfig_ = subBuilder.buildPartial(); + } + break; } case 106: @@ -644,6 +661,58 @@ public com.google.cloud.videointelligence.v1.TextDetectionConfig getTextDetectio return getTextDetectionConfig(); } + public static final int PERSON_DETECTION_CONFIG_FIELD_NUMBER = 11; + private com.google.cloud.videointelligence.v1.PersonDetectionConfig personDetectionConfig_; + /** + * + * + *
+   * Config for PERSON_DETECTION.
+   * 
+ * + * .google.cloud.videointelligence.v1.PersonDetectionConfig person_detection_config = 11; + * + * + * @return Whether the personDetectionConfig field is set. + */ + @java.lang.Override + public boolean hasPersonDetectionConfig() { + return personDetectionConfig_ != null; + } + /** + * + * + *
+   * Config for PERSON_DETECTION.
+   * 
+ * + * .google.cloud.videointelligence.v1.PersonDetectionConfig person_detection_config = 11; + * + * + * @return The personDetectionConfig. + */ + @java.lang.Override + public com.google.cloud.videointelligence.v1.PersonDetectionConfig getPersonDetectionConfig() { + return personDetectionConfig_ == null + ? com.google.cloud.videointelligence.v1.PersonDetectionConfig.getDefaultInstance() + : personDetectionConfig_; + } + /** + * + * + *
+   * Config for PERSON_DETECTION.
+   * 
+ * + * .google.cloud.videointelligence.v1.PersonDetectionConfig person_detection_config = 11; + * + */ + @java.lang.Override + public com.google.cloud.videointelligence.v1.PersonDetectionConfigOrBuilder + getPersonDetectionConfigOrBuilder() { + return getPersonDetectionConfig(); + } + public static final int OBJECT_TRACKING_CONFIG_FIELD_NUMBER = 13; private com.google.cloud.videointelligence.v1.ObjectTrackingConfig objectTrackingConfig_; /** @@ -731,6 +800,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (textDetectionConfig_ != null) { output.writeMessage(8, getTextDetectionConfig()); } + if (personDetectionConfig_ != null) { + output.writeMessage(11, getPersonDetectionConfig()); + } if (objectTrackingConfig_ != null) { output.writeMessage(13, getObjectTrackingConfig()); } @@ -771,6 +843,10 @@ public int getSerializedSize() { if (textDetectionConfig_ != null) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(8, getTextDetectionConfig()); } + if (personDetectionConfig_ != null) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(11, getPersonDetectionConfig()); + } if (objectTrackingConfig_ != null) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(13, getObjectTrackingConfig()); @@ -820,6 +896,10 @@ public boolean equals(final java.lang.Object obj) { if (hasTextDetectionConfig()) { if (!getTextDetectionConfig().equals(other.getTextDetectionConfig())) return false; } + if (hasPersonDetectionConfig() != other.hasPersonDetectionConfig()) return false; + if (hasPersonDetectionConfig()) { + if (!getPersonDetectionConfig().equals(other.getPersonDetectionConfig())) return false; + } if (hasObjectTrackingConfig() != other.hasObjectTrackingConfig()) return false; if (hasObjectTrackingConfig()) { if (!getObjectTrackingConfig().equals(other.getObjectTrackingConfig())) return false; @@ -863,6 +943,10 @@ public int hashCode() { hash = (37 * hash) + TEXT_DETECTION_CONFIG_FIELD_NUMBER; hash = (53 * hash) + getTextDetectionConfig().hashCode(); } + if (hasPersonDetectionConfig()) { + hash = (37 * hash) + PERSON_DETECTION_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getPersonDetectionConfig().hashCode(); + } if (hasObjectTrackingConfig()) { hash = (37 * hash) + OBJECT_TRACKING_CONFIG_FIELD_NUMBER; hash = (53 * hash) + getObjectTrackingConfig().hashCode(); @@ -1056,6 +1140,12 @@ public Builder clear() { textDetectionConfig_ = null; textDetectionConfigBuilder_ = null; } + if (personDetectionConfigBuilder_ == null) { + personDetectionConfig_ = null; + } else { + personDetectionConfig_ = null; + personDetectionConfigBuilder_ = null; + } if (objectTrackingConfigBuilder_ == null) { objectTrackingConfig_ = null; } else { @@ -1129,6 +1219,11 @@ public com.google.cloud.videointelligence.v1.VideoContext buildPartial() { } else { result.textDetectionConfig_ = textDetectionConfigBuilder_.build(); } + if (personDetectionConfigBuilder_ == null) { + result.personDetectionConfig_ = personDetectionConfig_; + } else { + result.personDetectionConfig_ = personDetectionConfigBuilder_.build(); + } if (objectTrackingConfigBuilder_ == null) { result.objectTrackingConfig_ = objectTrackingConfig_; } else { @@ -1229,6 +1324,9 @@ public Builder mergeFrom(com.google.cloud.videointelligence.v1.VideoContext othe if (other.hasTextDetectionConfig()) { mergeTextDetectionConfig(other.getTextDetectionConfig()); } + if (other.hasPersonDetectionConfig()) { + mergePersonDetectionConfig(other.getPersonDetectionConfig()); + } if (other.hasObjectTrackingConfig()) { mergeObjectTrackingConfig(other.getObjectTrackingConfig()); } @@ -2894,6 +2992,206 @@ public Builder clearTextDetectionConfig() { return textDetectionConfigBuilder_; } + private com.google.cloud.videointelligence.v1.PersonDetectionConfig personDetectionConfig_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.videointelligence.v1.PersonDetectionConfig, + com.google.cloud.videointelligence.v1.PersonDetectionConfig.Builder, + com.google.cloud.videointelligence.v1.PersonDetectionConfigOrBuilder> + personDetectionConfigBuilder_; + /** + * + * + *
+     * Config for PERSON_DETECTION.
+     * 
+ * + * .google.cloud.videointelligence.v1.PersonDetectionConfig person_detection_config = 11; + * + * + * @return Whether the personDetectionConfig field is set. + */ + public boolean hasPersonDetectionConfig() { + return personDetectionConfigBuilder_ != null || personDetectionConfig_ != null; + } + /** + * + * + *
+     * Config for PERSON_DETECTION.
+     * 
+ * + * .google.cloud.videointelligence.v1.PersonDetectionConfig person_detection_config = 11; + * + * + * @return The personDetectionConfig. + */ + public com.google.cloud.videointelligence.v1.PersonDetectionConfig getPersonDetectionConfig() { + if (personDetectionConfigBuilder_ == null) { + return personDetectionConfig_ == null + ? com.google.cloud.videointelligence.v1.PersonDetectionConfig.getDefaultInstance() + : personDetectionConfig_; + } else { + return personDetectionConfigBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Config for PERSON_DETECTION.
+     * 
+ * + * .google.cloud.videointelligence.v1.PersonDetectionConfig person_detection_config = 11; + * + */ + public Builder setPersonDetectionConfig( + com.google.cloud.videointelligence.v1.PersonDetectionConfig value) { + if (personDetectionConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + personDetectionConfig_ = value; + onChanged(); + } else { + personDetectionConfigBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Config for PERSON_DETECTION.
+     * 
+ * + * .google.cloud.videointelligence.v1.PersonDetectionConfig person_detection_config = 11; + * + */ + public Builder setPersonDetectionConfig( + com.google.cloud.videointelligence.v1.PersonDetectionConfig.Builder builderForValue) { + if (personDetectionConfigBuilder_ == null) { + personDetectionConfig_ = builderForValue.build(); + onChanged(); + } else { + personDetectionConfigBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Config for PERSON_DETECTION.
+     * 
+ * + * .google.cloud.videointelligence.v1.PersonDetectionConfig person_detection_config = 11; + * + */ + public Builder mergePersonDetectionConfig( + com.google.cloud.videointelligence.v1.PersonDetectionConfig value) { + if (personDetectionConfigBuilder_ == null) { + if (personDetectionConfig_ != null) { + personDetectionConfig_ = + com.google.cloud.videointelligence.v1.PersonDetectionConfig.newBuilder( + personDetectionConfig_) + .mergeFrom(value) + .buildPartial(); + } else { + personDetectionConfig_ = value; + } + onChanged(); + } else { + personDetectionConfigBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Config for PERSON_DETECTION.
+     * 
+ * + * .google.cloud.videointelligence.v1.PersonDetectionConfig person_detection_config = 11; + * + */ + public Builder clearPersonDetectionConfig() { + if (personDetectionConfigBuilder_ == null) { + personDetectionConfig_ = null; + onChanged(); + } else { + personDetectionConfig_ = null; + personDetectionConfigBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Config for PERSON_DETECTION.
+     * 
+ * + * .google.cloud.videointelligence.v1.PersonDetectionConfig person_detection_config = 11; + * + */ + public com.google.cloud.videointelligence.v1.PersonDetectionConfig.Builder + getPersonDetectionConfigBuilder() { + + onChanged(); + return getPersonDetectionConfigFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Config for PERSON_DETECTION.
+     * 
+ * + * .google.cloud.videointelligence.v1.PersonDetectionConfig person_detection_config = 11; + * + */ + public com.google.cloud.videointelligence.v1.PersonDetectionConfigOrBuilder + getPersonDetectionConfigOrBuilder() { + if (personDetectionConfigBuilder_ != null) { + return personDetectionConfigBuilder_.getMessageOrBuilder(); + } else { + return personDetectionConfig_ == null + ? com.google.cloud.videointelligence.v1.PersonDetectionConfig.getDefaultInstance() + : personDetectionConfig_; + } + } + /** + * + * + *
+     * Config for PERSON_DETECTION.
+     * 
+ * + * .google.cloud.videointelligence.v1.PersonDetectionConfig person_detection_config = 11; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.videointelligence.v1.PersonDetectionConfig, + com.google.cloud.videointelligence.v1.PersonDetectionConfig.Builder, + com.google.cloud.videointelligence.v1.PersonDetectionConfigOrBuilder> + getPersonDetectionConfigFieldBuilder() { + if (personDetectionConfigBuilder_ == null) { + personDetectionConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.videointelligence.v1.PersonDetectionConfig, + com.google.cloud.videointelligence.v1.PersonDetectionConfig.Builder, + com.google.cloud.videointelligence.v1.PersonDetectionConfigOrBuilder>( + getPersonDetectionConfig(), getParentForChildren(), isClean()); + personDetectionConfig_ = null; + } + return personDetectionConfigBuilder_; + } + private com.google.cloud.videointelligence.v1.ObjectTrackingConfig objectTrackingConfig_; private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.videointelligence.v1.ObjectTrackingConfig, diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoContextOrBuilder.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoContextOrBuilder.java index c447231b3..7810b1e07 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoContextOrBuilder.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoContextOrBuilder.java @@ -323,6 +323,45 @@ public interface VideoContextOrBuilder com.google.cloud.videointelligence.v1.TextDetectionConfigOrBuilder getTextDetectionConfigOrBuilder(); + /** + * + * + *
+   * Config for PERSON_DETECTION.
+   * 
+ * + * .google.cloud.videointelligence.v1.PersonDetectionConfig person_detection_config = 11; + * + * + * @return Whether the personDetectionConfig field is set. + */ + boolean hasPersonDetectionConfig(); + /** + * + * + *
+   * Config for PERSON_DETECTION.
+   * 
+ * + * .google.cloud.videointelligence.v1.PersonDetectionConfig person_detection_config = 11; + * + * + * @return The personDetectionConfig. + */ + com.google.cloud.videointelligence.v1.PersonDetectionConfig getPersonDetectionConfig(); + /** + * + * + *
+   * Config for PERSON_DETECTION.
+   * 
+ * + * .google.cloud.videointelligence.v1.PersonDetectionConfig person_detection_config = 11; + * + */ + com.google.cloud.videointelligence.v1.PersonDetectionConfigOrBuilder + getPersonDetectionConfigOrBuilder(); + /** * * diff --git a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoIntelligenceServiceProto.java b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoIntelligenceServiceProto.java index c19f00d93..31101fe05 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoIntelligenceServiceProto.java +++ b/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoIntelligenceServiceProto.java @@ -51,6 +51,10 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r internal_static_google_cloud_videointelligence_v1_FaceDetectionConfig_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_cloud_videointelligence_v1_FaceDetectionConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_videointelligence_v1_PersonDetectionConfig_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_videointelligence_v1_PersonDetectionConfig_fieldAccessorTable; static final com.google.protobuf.Descriptors.Descriptor internal_static_google_cloud_videointelligence_v1_ExplicitContentDetectionConfig_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable @@ -91,6 +95,14 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r internal_static_google_cloud_videointelligence_v1_NormalizedBoundingBox_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_cloud_videointelligence_v1_NormalizedBoundingBox_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_videointelligence_v1_FaceDetectionAnnotation_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_videointelligence_v1_FaceDetectionAnnotation_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_videointelligence_v1_PersonDetectionAnnotation_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_videointelligence_v1_PersonDetectionAnnotation_fieldAccessorTable; static final com.google.protobuf.Descriptors.Descriptor internal_static_google_cloud_videointelligence_v1_FaceSegment_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable @@ -210,7 +222,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "\003\340A\002\022F\n\rvideo_context\030\003 \001(\0132/.google.clo" + "ud.videointelligence.v1.VideoContext\022\027\n\n" + "output_uri\030\004 \001(\tB\003\340A\001\022\030\n\013location_id\030\005 \001" - + "(\tB\003\340A\001\"\346\005\n\014VideoContext\022A\n\010segments\030\001 \003" + + "(\tB\003\340A\001\"\301\006\n\014VideoContext\022A\n\010segments\030\001 \003" + "(\0132/.google.cloud.videointelligence.v1.V" + "ideoSegment\022W\n\026label_detection_config\030\002 " + "\001(\01327.google.cloud.videointelligence.v1." @@ -226,197 +238,214 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "gle.cloud.videointelligence.v1.SpeechTra" + "nscriptionConfig\022U\n\025text_detection_confi" + "g\030\010 \001(\01326.google.cloud.videointelligence" - + ".v1.TextDetectionConfig\022W\n\026object_tracki" - + "ng_config\030\r \001(\01327.google.cloud.videointe" - + "lligence.v1.ObjectTrackingConfig\"\335\001\n\024Lab" - + "elDetectionConfig\022S\n\024label_detection_mod" - + "e\030\001 \001(\01625.google.cloud.videointelligence" - + ".v1.LabelDetectionMode\022\031\n\021stationary_cam" - + "era\030\002 \001(\010\022\r\n\005model\030\003 \001(\t\022\"\n\032frame_confid" - + "ence_threshold\030\004 \001(\002\022\"\n\032video_confidence" - + "_threshold\030\005 \001(\002\"*\n\031ShotChangeDetectionC" - + "onfig\022\r\n\005model\030\001 \001(\t\"%\n\024ObjectTrackingCo" - + "nfig\022\r\n\005model\030\001 \001(\t\"D\n\023FaceDetectionConf" - + "ig\022\r\n\005model\030\001 \001(\t\022\036\n\026include_bounding_bo" - + "xes\030\002 \001(\010\"/\n\036ExplicitContentDetectionCon" - + "fig\022\r\n\005model\030\001 \001(\t\"<\n\023TextDetectionConfi" - + "g\022\026\n\016language_hints\030\001 \003(\t\022\r\n\005model\030\002 \001(\t" - + "\"x\n\014VideoSegment\0224\n\021start_time_offset\030\001 " - + "\001(\0132\031.google.protobuf.Duration\0222\n\017end_ti" - + "me_offset\030\002 \001(\0132\031.google.protobuf.Durati" - + "on\"d\n\014LabelSegment\022@\n\007segment\030\001 \001(\0132/.go" + + ".v1.TextDetectionConfig\022Y\n\027person_detect" + + "ion_config\030\013 \001(\01328.google.cloud.videoint" + + "elligence.v1.PersonDetectionConfig\022W\n\026ob" + + "ject_tracking_config\030\r \001(\01327.google.clou" + + "d.videointelligence.v1.ObjectTrackingCon" + + "fig\"\335\001\n\024LabelDetectionConfig\022S\n\024label_de" + + "tection_mode\030\001 \001(\01625.google.cloud.videoi" + + "ntelligence.v1.LabelDetectionMode\022\031\n\021sta" + + "tionary_camera\030\002 \001(\010\022\r\n\005model\030\003 \001(\t\022\"\n\032f" + + "rame_confidence_threshold\030\004 \001(\002\022\"\n\032video" + + "_confidence_threshold\030\005 \001(\002\"*\n\031ShotChang" + + "eDetectionConfig\022\r\n\005model\030\001 \001(\t\"%\n\024Objec" + + "tTrackingConfig\022\r\n\005model\030\001 \001(\t\"`\n\023FaceDe" + + "tectionConfig\022\r\n\005model\030\001 \001(\t\022\036\n\026include_" + + "bounding_boxes\030\002 \001(\010\022\032\n\022include_attribut" + + "es\030\005 \001(\010\"s\n\025PersonDetectionConfig\022\036\n\026inc" + + "lude_bounding_boxes\030\001 \001(\010\022\036\n\026include_pos" + + "e_landmarks\030\002 \001(\010\022\032\n\022include_attributes\030" + + "\003 \001(\010\"/\n\036ExplicitContentDetectionConfig\022" + + "\r\n\005model\030\001 \001(\t\"<\n\023TextDetectionConfig\022\026\n" + + "\016language_hints\030\001 \003(\t\022\r\n\005model\030\002 \001(\t\"x\n\014" + + "VideoSegment\0224\n\021start_time_offset\030\001 \001(\0132" + + "\031.google.protobuf.Duration\0222\n\017end_time_o" + + "ffset\030\002 \001(\0132\031.google.protobuf.Duration\"d" + + "\n\014LabelSegment\022@\n\007segment\030\001 \001(\0132/.google" + + ".cloud.videointelligence.v1.VideoSegment" + + "\022\022\n\nconfidence\030\002 \001(\002\"P\n\nLabelFrame\022.\n\013ti" + + "me_offset\030\001 \001(\0132\031.google.protobuf.Durati" + + "on\022\022\n\nconfidence\030\002 \001(\002\"G\n\006Entity\022\021\n\tenti" + + "ty_id\030\001 \001(\t\022\023\n\013description\030\002 \001(\t\022\025\n\rlang" + + "uage_code\030\003 \001(\t\"\245\002\n\017LabelAnnotation\0229\n\006e" + + "ntity\030\001 \001(\0132).google.cloud.videointellig" + + "ence.v1.Entity\022D\n\021category_entities\030\002 \003(" + + "\0132).google.cloud.videointelligence.v1.En" + + "tity\022A\n\010segments\030\003 \003(\0132/.google.cloud.vi" + + "deointelligence.v1.LabelSegment\022=\n\006frame" + + "s\030\004 \003(\0132-.google.cloud.videointelligence" + + ".v1.LabelFrame\022\017\n\007version\030\005 \001(\t\"\225\001\n\024Expl" + + "icitContentFrame\022.\n\013time_offset\030\001 \001(\0132\031." + + "google.protobuf.Duration\022M\n\026pornography_" + + "likelihood\030\002 \001(\0162-.google.cloud.videoint" + + "elligence.v1.Likelihood\"u\n\031ExplicitConte" + + "ntAnnotation\022G\n\006frames\030\001 \003(\01327.google.cl" + + "oud.videointelligence.v1.ExplicitContent" + + "Frame\022\017\n\007version\030\002 \001(\t\"Q\n\025NormalizedBoun" + + "dingBox\022\014\n\004left\030\001 \001(\002\022\013\n\003top\030\002 \001(\002\022\r\n\005ri" + + "ght\030\003 \001(\002\022\016\n\006bottom\030\004 \001(\002\"*\n\027FaceDetecti" + + "onAnnotation\022\017\n\007version\030\005 \001(\t\"f\n\031PersonD" + + "etectionAnnotation\0228\n\006tracks\030\001 \003(\0132(.goo" + + "gle.cloud.videointelligence.v1.Track\022\017\n\007" + + "version\030\002 \001(\t\"O\n\013FaceSegment\022@\n\007segment\030" + + "\001 \001(\0132/.google.cloud.videointelligence.v" + + "1.VideoSegment\"\234\001\n\tFaceFrame\022[\n\031normaliz" + + "ed_bounding_boxes\030\001 \003(\01328.google.cloud.v" + + "ideointelligence.v1.NormalizedBoundingBo" + + "x\022.\n\013time_offset\030\002 \001(\0132\031.google.protobuf" + + ".Duration:\002\030\001\"\247\001\n\016FaceAnnotation\022\021\n\tthum" + + "bnail\030\001 \001(\014\022@\n\010segments\030\002 \003(\0132..google.c" + + "loud.videointelligence.v1.FaceSegment\022<\n" + + "\006frames\030\003 \003(\0132,.google.cloud.videointell" + + "igence.v1.FaceFrame:\002\030\001\"\272\002\n\021TimestampedO" + + "bject\022Y\n\027normalized_bounding_box\030\001 \001(\01328" + + ".google.cloud.videointelligence.v1.Norma" + + "lizedBoundingBox\022.\n\013time_offset\030\002 \001(\0132\031." + + "google.protobuf.Duration\022M\n\nattributes\030\003" + + " \003(\01324.google.cloud.videointelligence.v1" + + ".DetectedAttributeB\003\340A\001\022K\n\tlandmarks\030\004 \003" + + "(\01323.google.cloud.videointelligence.v1.D" + + "etectedLandmarkB\003\340A\001\"\204\002\n\005Track\022@\n\007segmen" + + "t\030\001 \001(\0132/.google.cloud.videointelligence" + + ".v1.VideoSegment\022Q\n\023timestamped_objects\030" + + "\002 \003(\01324.google.cloud.videointelligence.v" + + "1.TimestampedObject\022M\n\nattributes\030\003 \003(\0132" + + "4.google.cloud.videointelligence.v1.Dete" + + "ctedAttributeB\003\340A\001\022\027\n\nconfidence\030\004 \001(\002B\003" + + "\340A\001\"D\n\021DetectedAttribute\022\014\n\004name\030\001 \001(\t\022\022" + + "\n\nconfidence\030\002 \001(\002\022\r\n\005value\030\003 \001(\t\"x\n\020Det" + + "ectedLandmark\022\014\n\004name\030\001 \001(\t\022B\n\005point\030\002 \001" + + "(\01323.google.cloud.videointelligence.v1.N" + + "ormalizedVertex\022\022\n\nconfidence\030\003 \001(\002\"\351\n\n\026" + + "VideoAnnotationResults\022\021\n\tinput_uri\030\001 \001(" + + "\t\022@\n\007segment\030\n \001(\0132/.google.cloud.videoi" + + "ntelligence.v1.VideoSegment\022U\n\031segment_l" + + "abel_annotations\030\002 \003(\01322.google.cloud.vi" + + "deointelligence.v1.LabelAnnotation\022^\n\"se" + + "gment_presence_label_annotations\030\027 \003(\01322" + + ".google.cloud.videointelligence.v1.Label" + + "Annotation\022R\n\026shot_label_annotations\030\003 \003" + + "(\01322.google.cloud.videointelligence.v1.L" + + "abelAnnotation\022[\n\037shot_presence_label_an" + + "notations\030\030 \003(\01322.google.cloud.videointe" + + "lligence.v1.LabelAnnotation\022S\n\027frame_lab" + + "el_annotations\030\004 \003(\01322.google.cloud.vide" + + "ointelligence.v1.LabelAnnotation\022O\n\020face" + + "_annotations\030\005 \003(\01321.google.cloud.videoi" + + "ntelligence.v1.FaceAnnotationB\002\030\001\022^\n\032fac" + + "e_detection_annotations\030\r \003(\0132:.google.c" + + "loud.videointelligence.v1.FaceDetectionA" + + "nnotation\022I\n\020shot_annotations\030\006 \003(\0132/.go" + "ogle.cloud.videointelligence.v1.VideoSeg" - + "ment\022\022\n\nconfidence\030\002 \001(\002\"P\n\nLabelFrame\022." - + "\n\013time_offset\030\001 \001(\0132\031.google.protobuf.Du" - + "ration\022\022\n\nconfidence\030\002 \001(\002\"G\n\006Entity\022\021\n\t" - + "entity_id\030\001 \001(\t\022\023\n\013description\030\002 \001(\t\022\025\n\r" - + "language_code\030\003 \001(\t\"\224\002\n\017LabelAnnotation\022" - + "9\n\006entity\030\001 \001(\0132).google.cloud.videointe" - + "lligence.v1.Entity\022D\n\021category_entities\030" - + "\002 \003(\0132).google.cloud.videointelligence.v" - + "1.Entity\022A\n\010segments\030\003 \003(\0132/.google.clou" - + "d.videointelligence.v1.LabelSegment\022=\n\006f" - + "rames\030\004 \003(\0132-.google.cloud.videointellig" - + "ence.v1.LabelFrame\"\225\001\n\024ExplicitContentFr" - + "ame\022.\n\013time_offset\030\001 \001(\0132\031.google.protob" - + "uf.Duration\022M\n\026pornography_likelihood\030\002 " - + "\001(\0162-.google.cloud.videointelligence.v1." - + "Likelihood\"d\n\031ExplicitContentAnnotation\022" - + "G\n\006frames\030\001 \003(\01327.google.cloud.videointe" - + "lligence.v1.ExplicitContentFrame\"Q\n\025Norm" - + "alizedBoundingBox\022\014\n\004left\030\001 \001(\002\022\013\n\003top\030\002" - + " \001(\002\022\r\n\005right\030\003 \001(\002\022\016\n\006bottom\030\004 \001(\002\"O\n\013F" - + "aceSegment\022@\n\007segment\030\001 \001(\0132/.google.clo" - + "ud.videointelligence.v1.VideoSegment\"\230\001\n" - + "\tFaceFrame\022[\n\031normalized_bounding_boxes\030" - + "\001 \003(\01328.google.cloud.videointelligence.v" - + "1.NormalizedBoundingBox\022.\n\013time_offset\030\002" - + " \001(\0132\031.google.protobuf.Duration\"\243\001\n\016Face" - + "Annotation\022\021\n\tthumbnail\030\001 \001(\014\022@\n\010segment" - + "s\030\002 \003(\0132..google.cloud.videointelligence" - + ".v1.FaceSegment\022<\n\006frames\030\003 \003(\0132,.google" - + ".cloud.videointelligence.v1.FaceFrame\"\272\002" - + "\n\021TimestampedObject\022Y\n\027normalized_boundi" - + "ng_box\030\001 \001(\01328.google.cloud.videointelli" - + "gence.v1.NormalizedBoundingBox\022.\n\013time_o" - + "ffset\030\002 \001(\0132\031.google.protobuf.Duration\022M" - + "\n\nattributes\030\003 \003(\01324.google.cloud.videoi" - + "ntelligence.v1.DetectedAttributeB\003\340A\001\022K\n" - + "\tlandmarks\030\004 \003(\01323.google.cloud.videoint" - + "elligence.v1.DetectedLandmarkB\003\340A\001\"\204\002\n\005T" - + "rack\022@\n\007segment\030\001 \001(\0132/.google.cloud.vid" - + "eointelligence.v1.VideoSegment\022Q\n\023timest" - + "amped_objects\030\002 \003(\01324.google.cloud.video" - + "intelligence.v1.TimestampedObject\022M\n\natt" - + "ributes\030\003 \003(\01324.google.cloud.videointell" - + "igence.v1.DetectedAttributeB\003\340A\001\022\027\n\nconf" - + "idence\030\004 \001(\002B\003\340A\001\"D\n\021DetectedAttribute\022\014" - + "\n\004name\030\001 \001(\t\022\022\n\nconfidence\030\002 \001(\002\022\r\n\005valu" - + "e\030\003 \001(\t\"x\n\020DetectedLandmark\022\014\n\004name\030\001 \001(" - + "\t\022B\n\005point\030\002 \001(\01323.google.cloud.videoint" - + "elligence.v1.NormalizedVertex\022\022\n\nconfide" - + "nce\030\003 \001(\002\"\241\t\n\026VideoAnnotationResults\022\021\n\t" - + "input_uri\030\001 \001(\t\022@\n\007segment\030\n \001(\0132/.googl" - + "e.cloud.videointelligence.v1.VideoSegmen" - + "t\022U\n\031segment_label_annotations\030\002 \003(\01322.g" - + "oogle.cloud.videointelligence.v1.LabelAn" - + "notation\022^\n\"segment_presence_label_annot" - + "ations\030\027 \003(\01322.google.cloud.videointelli" - + "gence.v1.LabelAnnotation\022R\n\026shot_label_a" - + "nnotations\030\003 \003(\01322.google.cloud.videoint" - + "elligence.v1.LabelAnnotation\022[\n\037shot_pre" - + "sence_label_annotations\030\030 \003(\01322.google.c" - + "loud.videointelligence.v1.LabelAnnotatio" - + "n\022S\n\027frame_label_annotations\030\004 \003(\01322.goo" - + "gle.cloud.videointelligence.v1.LabelAnno" - + "tation\022K\n\020face_annotations\030\005 \003(\01321.googl" - + "e.cloud.videointelligence.v1.FaceAnnotat" - + "ion\022I\n\020shot_annotations\030\006 \003(\0132/.google.c" - + "loud.videointelligence.v1.VideoSegment\022Y" - + "\n\023explicit_annotation\030\007 \001(\0132<.google.clo" - + "ud.videointelligence.v1.ExplicitContentA" - + "nnotation\022U\n\025speech_transcriptions\030\013 \003(\013" - + "26.google.cloud.videointelligence.v1.Spe" - + "echTranscription\022K\n\020text_annotations\030\014 \003" - + "(\01321.google.cloud.videointelligence.v1.T" - + "extAnnotation\022W\n\022object_annotations\030\016 \003(" - + "\0132;.google.cloud.videointelligence.v1.Ob" - + "jectTrackingAnnotation\022b\n\034logo_recogniti" - + "on_annotations\030\023 \003(\0132<.google.cloud.vide" - + "ointelligence.v1.LogoRecognitionAnnotati" - + "on\022!\n\005error\030\t \001(\0132\022.google.rpc.Status\"n\n" - + "\025AnnotateVideoResponse\022U\n\022annotation_res" - + "ults\030\001 \003(\01329.google.cloud.videointellige" - + "nce.v1.VideoAnnotationResults\"\246\002\n\027VideoA" - + "nnotationProgress\022\021\n\tinput_uri\030\001 \001(\t\022\030\n\020" - + "progress_percent\030\002 \001(\005\022.\n\nstart_time\030\003 \001" - + "(\0132\032.google.protobuf.Timestamp\022/\n\013update" - + "_time\030\004 \001(\0132\032.google.protobuf.Timestamp\022" - + ";\n\007feature\030\005 \001(\0162*.google.cloud.videoint" - + "elligence.v1.Feature\022@\n\007segment\030\006 \001(\0132/." + + "ment\022Y\n\023explicit_annotation\030\007 \001(\0132<.goog" + + "le.cloud.videointelligence.v1.ExplicitCo" + + "ntentAnnotation\022U\n\025speech_transcriptions" + + "\030\013 \003(\01326.google.cloud.videointelligence." + + "v1.SpeechTranscription\022K\n\020text_annotatio" + + "ns\030\014 \003(\01321.google.cloud.videointelligenc" + + "e.v1.TextAnnotation\022W\n\022object_annotation" + + "s\030\016 \003(\0132;.google.cloud.videointelligence" + + ".v1.ObjectTrackingAnnotation\022b\n\034logo_rec" + + "ognition_annotations\030\023 \003(\0132<.google.clou" + + "d.videointelligence.v1.LogoRecognitionAn" + + "notation\022b\n\034person_detection_annotations" + + "\030\024 \003(\0132<.google.cloud.videointelligence." + + "v1.PersonDetectionAnnotation\022!\n\005error\030\t " + + "\001(\0132\022.google.rpc.Status\"n\n\025AnnotateVideo" + + "Response\022U\n\022annotation_results\030\001 \003(\01329.g" + + "oogle.cloud.videointelligence.v1.VideoAn" + + "notationResults\"\246\002\n\027VideoAnnotationProgr" + + "ess\022\021\n\tinput_uri\030\001 \001(\t\022\030\n\020progress_perce" + + "nt\030\002 \001(\005\022.\n\nstart_time\030\003 \001(\0132\032.google.pr" + + "otobuf.Timestamp\022/\n\013update_time\030\004 \001(\0132\032." + + "google.protobuf.Timestamp\022;\n\007feature\030\005 \001" + + "(\0162*.google.cloud.videointelligence.v1.F" + + "eature\022@\n\007segment\030\006 \001(\0132/.google.cloud.v" + + "ideointelligence.v1.VideoSegment\"p\n\025Anno" + + "tateVideoProgress\022W\n\023annotation_progress" + + "\030\001 \003(\0132:.google.cloud.videointelligence." + + "v1.VideoAnnotationProgress\"\201\003\n\031SpeechTra" + + "nscriptionConfig\022\032\n\rlanguage_code\030\001 \001(\tB" + + "\003\340A\002\022\035\n\020max_alternatives\030\002 \001(\005B\003\340A\001\022\035\n\020f" + + "ilter_profanity\030\003 \001(\010B\003\340A\001\022N\n\017speech_con" + + "texts\030\004 \003(\01320.google.cloud.videointellig" + + "ence.v1.SpeechContextB\003\340A\001\022)\n\034enable_aut" + + "omatic_punctuation\030\005 \001(\010B\003\340A\001\022\031\n\014audio_t" + + "racks\030\006 \003(\005B\003\340A\001\022\'\n\032enable_speaker_diari" + + "zation\030\007 \001(\010B\003\340A\001\022&\n\031diarization_speaker" + + "_count\030\010 \001(\005B\003\340A\001\022#\n\026enable_word_confide" + + "nce\030\t \001(\010B\003\340A\001\"%\n\rSpeechContext\022\024\n\007phras" + + "es\030\001 \003(\tB\003\340A\001\"\210\001\n\023SpeechTranscription\022U\n" + + "\014alternatives\030\001 \003(\0132?.google.cloud.video" + + "intelligence.v1.SpeechRecognitionAlterna" + + "tive\022\032\n\rlanguage_code\030\002 \001(\tB\003\340A\003\"\214\001\n\034Spe" + + "echRecognitionAlternative\022\022\n\ntranscript\030" + + "\001 \001(\t\022\027\n\nconfidence\030\002 \001(\002B\003\340A\003\022?\n\005words\030" + + "\003 \003(\0132+.google.cloud.videointelligence.v" + + "1.WordInfoB\003\340A\003\"\247\001\n\010WordInfo\022-\n\nstart_ti" + + "me\030\001 \001(\0132\031.google.protobuf.Duration\022+\n\010e" + + "nd_time\030\002 \001(\0132\031.google.protobuf.Duration" + + "\022\014\n\004word\030\003 \001(\t\022\027\n\nconfidence\030\004 \001(\002B\003\340A\003\022" + + "\030\n\013speaker_tag\030\005 \001(\005B\003\340A\003\"(\n\020NormalizedV" + + "ertex\022\t\n\001x\030\001 \001(\002\022\t\n\001y\030\002 \001(\002\"_\n\026Normalize" + + "dBoundingPoly\022E\n\010vertices\030\001 \003(\01323.google" + + ".cloud.videointelligence.v1.NormalizedVe" + + "rtex\"\241\001\n\013TextSegment\022@\n\007segment\030\001 \001(\0132/." + "google.cloud.videointelligence.v1.VideoS" - + "egment\"p\n\025AnnotateVideoProgress\022W\n\023annot" - + "ation_progress\030\001 \003(\0132:.google.cloud.vide" - + "ointelligence.v1.VideoAnnotationProgress" - + "\"\201\003\n\031SpeechTranscriptionConfig\022\032\n\rlangua" - + "ge_code\030\001 \001(\tB\003\340A\002\022\035\n\020max_alternatives\030\002" - + " \001(\005B\003\340A\001\022\035\n\020filter_profanity\030\003 \001(\010B\003\340A\001" - + "\022N\n\017speech_contexts\030\004 \003(\01320.google.cloud" - + ".videointelligence.v1.SpeechContextB\003\340A\001" - + "\022)\n\034enable_automatic_punctuation\030\005 \001(\010B\003" - + "\340A\001\022\031\n\014audio_tracks\030\006 \003(\005B\003\340A\001\022\'\n\032enable" - + "_speaker_diarization\030\007 \001(\010B\003\340A\001\022&\n\031diari" - + "zation_speaker_count\030\010 \001(\005B\003\340A\001\022#\n\026enabl" - + "e_word_confidence\030\t \001(\010B\003\340A\001\"%\n\rSpeechCo" - + "ntext\022\024\n\007phrases\030\001 \003(\tB\003\340A\001\"\210\001\n\023SpeechTr" - + "anscription\022U\n\014alternatives\030\001 \003(\0132?.goog" - + "le.cloud.videointelligence.v1.SpeechReco" - + "gnitionAlternative\022\032\n\rlanguage_code\030\002 \001(" - + "\tB\003\340A\003\"\214\001\n\034SpeechRecognitionAlternative\022" - + "\022\n\ntranscript\030\001 \001(\t\022\027\n\nconfidence\030\002 \001(\002B" - + "\003\340A\003\022?\n\005words\030\003 \003(\0132+.google.cloud.video" - + "intelligence.v1.WordInfoB\003\340A\003\"\247\001\n\010WordIn" - + "fo\022-\n\nstart_time\030\001 \001(\0132\031.google.protobuf" - + ".Duration\022+\n\010end_time\030\002 \001(\0132\031.google.pro" - + "tobuf.Duration\022\014\n\004word\030\003 \001(\t\022\027\n\nconfiden" - + "ce\030\004 \001(\002B\003\340A\003\022\030\n\013speaker_tag\030\005 \001(\005B\003\340A\003\"" - + "(\n\020NormalizedVertex\022\t\n\001x\030\001 \001(\002\022\t\n\001y\030\002 \001(" - + "\002\"_\n\026NormalizedBoundingPoly\022E\n\010vertices\030" - + "\001 \003(\01323.google.cloud.videointelligence.v" - + "1.NormalizedVertex\"\241\001\n\013TextSegment\022@\n\007se" - + "gment\030\001 \001(\0132/.google.cloud.videointellig" - + "ence.v1.VideoSegment\022\022\n\nconfidence\030\002 \001(\002" - + "\022<\n\006frames\030\003 \003(\0132,.google.cloud.videoint" - + "elligence.v1.TextFrame\"\224\001\n\tTextFrame\022W\n\024" - + "rotated_bounding_box\030\001 \001(\01329.google.clou" - + "d.videointelligence.v1.NormalizedBoundin" - + "gPoly\022.\n\013time_offset\030\002 \001(\0132\031.google.prot" - + "obuf.Duration\"`\n\016TextAnnotation\022\014\n\004text\030" - + "\001 \001(\t\022@\n\010segments\030\002 \003(\0132..google.cloud.v" - + "ideointelligence.v1.TextSegment\"\240\001\n\023Obje" - + "ctTrackingFrame\022Y\n\027normalized_bounding_b" - + "ox\030\001 \001(\01328.google.cloud.videointelligenc" - + "e.v1.NormalizedBoundingBox\022.\n\013time_offse" - + "t\030\002 \001(\0132\031.google.protobuf.Duration\"\227\002\n\030O" - + "bjectTrackingAnnotation\022B\n\007segment\030\003 \001(\013" - + "2/.google.cloud.videointelligence.v1.Vid" - + "eoSegmentH\000\022\022\n\010track_id\030\005 \001(\003H\000\0229\n\006entit" - + "y\030\001 \001(\0132).google.cloud.videointelligence" - + ".v1.Entity\022\022\n\nconfidence\030\004 \001(\002\022F\n\006frames" - + "\030\002 \003(\01326.google.cloud.videointelligence." - + "v1.ObjectTrackingFrameB\014\n\ntrack_info\"\323\001\n" - + "\031LogoRecognitionAnnotation\0229\n\006entity\030\001 \001" - + "(\0132).google.cloud.videointelligence.v1.E" - + "ntity\0228\n\006tracks\030\002 \003(\0132(.google.cloud.vid" - + "eointelligence.v1.Track\022A\n\010segments\030\003 \003(" - + "\0132/.google.cloud.videointelligence.v1.Vi" - + "deoSegment*\337\001\n\007Feature\022\027\n\023FEATURE_UNSPEC" - + "IFIED\020\000\022\023\n\017LABEL_DETECTION\020\001\022\031\n\025SHOT_CHA" - + "NGE_DETECTION\020\002\022\036\n\032EXPLICIT_CONTENT_DETE" - + "CTION\020\003\022\022\n\016FACE_DETECTION\020\004\022\030\n\024SPEECH_TR" - + "ANSCRIPTION\020\006\022\022\n\016TEXT_DETECTION\020\007\022\023\n\017OBJ" - + "ECT_TRACKING\020\t\022\024\n\020LOGO_RECOGNITION\020\014*r\n\022" - + "LabelDetectionMode\022$\n LABEL_DETECTION_MO" - + "DE_UNSPECIFIED\020\000\022\r\n\tSHOT_MODE\020\001\022\016\n\nFRAME" - + "_MODE\020\002\022\027\n\023SHOT_AND_FRAME_MODE\020\003*t\n\nLike" - + "lihood\022\032\n\026LIKELIHOOD_UNSPECIFIED\020\000\022\021\n\rVE" - + "RY_UNLIKELY\020\001\022\014\n\010UNLIKELY\020\002\022\014\n\010POSSIBLE\020" - + "\003\022\n\n\006LIKELY\020\004\022\017\n\013VERY_LIKELY\020\0052\300\002\n\030Video" - + "IntelligenceService\022\315\001\n\rAnnotateVideo\0227." - + "google.cloud.videointelligence.v1.Annota" - + "teVideoRequest\032\035.google.longrunning.Oper" - + "ation\"d\202\323\344\223\002\030\"\023/v1/videos:annotate:\001*\332A\022" - + "input_uri,features\312A.\n\025AnnotateVideoResp" - + "onse\022\025AnnotateVideoProgress\032T\312A videoint" - + "elligence.googleapis.com\322A.https://www.g" - + "oogleapis.com/auth/cloud-platformB\213\002\n%co" - + "m.google.cloud.videointelligence.v1B\035Vid" - + "eoIntelligenceServiceProtoP\001ZRgoogle.gol" - + "ang.org/genproto/googleapis/cloud/videoi" - + "ntelligence/v1;videointelligence\252\002!Googl" - + "e.Cloud.VideoIntelligence.V1\312\002!Google\\Cl" - + "oud\\VideoIntelligence\\V1\352\002$Google::Cloud" - + "::VideoIntelligence::V1b\006proto3" + + "egment\022\022\n\nconfidence\030\002 \001(\002\022<\n\006frames\030\003 \003" + + "(\0132,.google.cloud.videointelligence.v1.T" + + "extFrame\"\224\001\n\tTextFrame\022W\n\024rotated_boundi" + + "ng_box\030\001 \001(\01329.google.cloud.videointelli" + + "gence.v1.NormalizedBoundingPoly\022.\n\013time_" + + "offset\030\002 \001(\0132\031.google.protobuf.Duration\"" + + "q\n\016TextAnnotation\022\014\n\004text\030\001 \001(\t\022@\n\010segme" + + "nts\030\002 \003(\0132..google.cloud.videointelligen" + + "ce.v1.TextSegment\022\017\n\007version\030\003 \001(\t\"\240\001\n\023O" + + "bjectTrackingFrame\022Y\n\027normalized_boundin" + + "g_box\030\001 \001(\01328.google.cloud.videointellig" + + "ence.v1.NormalizedBoundingBox\022.\n\013time_of" + + "fset\030\002 \001(\0132\031.google.protobuf.Duration\"\250\002" + + "\n\030ObjectTrackingAnnotation\022B\n\007segment\030\003 " + + "\001(\0132/.google.cloud.videointelligence.v1." + + "VideoSegmentH\000\022\022\n\010track_id\030\005 \001(\003H\000\0229\n\006en" + + "tity\030\001 \001(\0132).google.cloud.videointellige" + + "nce.v1.Entity\022\022\n\nconfidence\030\004 \001(\002\022F\n\006fra" + + "mes\030\002 \003(\01326.google.cloud.videointelligen" + + "ce.v1.ObjectTrackingFrame\022\017\n\007version\030\006 \001" + + "(\tB\014\n\ntrack_info\"\323\001\n\031LogoRecognitionAnno" + + "tation\0229\n\006entity\030\001 \001(\0132).google.cloud.vi" + + "deointelligence.v1.Entity\0228\n\006tracks\030\002 \003(" + + "\0132(.google.cloud.videointelligence.v1.Tr" + + "ack\022A\n\010segments\030\003 \003(\0132/.google.cloud.vid" + + "eointelligence.v1.VideoSegment*\365\001\n\007Featu" + + "re\022\027\n\023FEATURE_UNSPECIFIED\020\000\022\023\n\017LABEL_DET" + + "ECTION\020\001\022\031\n\025SHOT_CHANGE_DETECTION\020\002\022\036\n\032E" + + "XPLICIT_CONTENT_DETECTION\020\003\022\022\n\016FACE_DETE" + + "CTION\020\004\022\030\n\024SPEECH_TRANSCRIPTION\020\006\022\022\n\016TEX" + + "T_DETECTION\020\007\022\023\n\017OBJECT_TRACKING\020\t\022\024\n\020LO" + + "GO_RECOGNITION\020\014\022\024\n\020PERSON_DETECTION\020\016*r" + + "\n\022LabelDetectionMode\022$\n LABEL_DETECTION_" + + "MODE_UNSPECIFIED\020\000\022\r\n\tSHOT_MODE\020\001\022\016\n\nFRA" + + "ME_MODE\020\002\022\027\n\023SHOT_AND_FRAME_MODE\020\003*t\n\nLi" + + "kelihood\022\032\n\026LIKELIHOOD_UNSPECIFIED\020\000\022\021\n\r" + + "VERY_UNLIKELY\020\001\022\014\n\010UNLIKELY\020\002\022\014\n\010POSSIBL" + + "E\020\003\022\n\n\006LIKELY\020\004\022\017\n\013VERY_LIKELY\020\0052\300\002\n\030Vid" + + "eoIntelligenceService\022\315\001\n\rAnnotateVideo\022" + + "7.google.cloud.videointelligence.v1.Anno" + + "tateVideoRequest\032\035.google.longrunning.Op" + + "eration\"d\202\323\344\223\002\030\"\023/v1/videos:annotate:\001*\332" + + "A\022input_uri,features\312A.\n\025AnnotateVideoRe" + + "sponse\022\025AnnotateVideoProgress\032T\312A videoi" + + "ntelligence.googleapis.com\322A.https://www" + + ".googleapis.com/auth/cloud-platformB\213\002\n%" + + "com.google.cloud.videointelligence.v1B\035V" + + "ideoIntelligenceServiceProtoP\001ZRgoogle.g" + + "olang.org/genproto/googleapis/cloud/vide" + + "ointelligence/v1;videointelligence\252\002!Goo" + + "gle.Cloud.VideoIntelligence.V1\312\002!Google\\" + + "Cloud\\VideoIntelligence\\V1\352\002$Google::Clo" + + "ud::VideoIntelligence::V1b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( @@ -451,6 +480,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "FaceDetectionConfig", "SpeechTranscriptionConfig", "TextDetectionConfig", + "PersonDetectionConfig", "ObjectTrackingConfig", }); internal_static_google_cloud_videointelligence_v1_LabelDetectionConfig_descriptor = @@ -487,10 +517,18 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_FaceDetectionConfig_descriptor, new java.lang.String[] { - "Model", "IncludeBoundingBoxes", + "Model", "IncludeBoundingBoxes", "IncludeAttributes", }); - internal_static_google_cloud_videointelligence_v1_ExplicitContentDetectionConfig_descriptor = + internal_static_google_cloud_videointelligence_v1_PersonDetectionConfig_descriptor = getDescriptor().getMessageTypes().get(6); + internal_static_google_cloud_videointelligence_v1_PersonDetectionConfig_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_videointelligence_v1_PersonDetectionConfig_descriptor, + new java.lang.String[] { + "IncludeBoundingBoxes", "IncludePoseLandmarks", "IncludeAttributes", + }); + internal_static_google_cloud_videointelligence_v1_ExplicitContentDetectionConfig_descriptor = + getDescriptor().getMessageTypes().get(7); internal_static_google_cloud_videointelligence_v1_ExplicitContentDetectionConfig_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_ExplicitContentDetectionConfig_descriptor, @@ -498,7 +536,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Model", }); internal_static_google_cloud_videointelligence_v1_TextDetectionConfig_descriptor = - getDescriptor().getMessageTypes().get(7); + getDescriptor().getMessageTypes().get(8); internal_static_google_cloud_videointelligence_v1_TextDetectionConfig_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_TextDetectionConfig_descriptor, @@ -506,7 +544,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "LanguageHints", "Model", }); internal_static_google_cloud_videointelligence_v1_VideoSegment_descriptor = - getDescriptor().getMessageTypes().get(8); + getDescriptor().getMessageTypes().get(9); internal_static_google_cloud_videointelligence_v1_VideoSegment_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_VideoSegment_descriptor, @@ -514,7 +552,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "StartTimeOffset", "EndTimeOffset", }); internal_static_google_cloud_videointelligence_v1_LabelSegment_descriptor = - getDescriptor().getMessageTypes().get(9); + getDescriptor().getMessageTypes().get(10); internal_static_google_cloud_videointelligence_v1_LabelSegment_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_LabelSegment_descriptor, @@ -522,7 +560,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Segment", "Confidence", }); internal_static_google_cloud_videointelligence_v1_LabelFrame_descriptor = - getDescriptor().getMessageTypes().get(10); + getDescriptor().getMessageTypes().get(11); internal_static_google_cloud_videointelligence_v1_LabelFrame_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_LabelFrame_descriptor, @@ -530,7 +568,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "TimeOffset", "Confidence", }); internal_static_google_cloud_videointelligence_v1_Entity_descriptor = - getDescriptor().getMessageTypes().get(11); + getDescriptor().getMessageTypes().get(12); internal_static_google_cloud_videointelligence_v1_Entity_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_Entity_descriptor, @@ -538,15 +576,15 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "EntityId", "Description", "LanguageCode", }); internal_static_google_cloud_videointelligence_v1_LabelAnnotation_descriptor = - getDescriptor().getMessageTypes().get(12); + getDescriptor().getMessageTypes().get(13); internal_static_google_cloud_videointelligence_v1_LabelAnnotation_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_LabelAnnotation_descriptor, new java.lang.String[] { - "Entity", "CategoryEntities", "Segments", "Frames", + "Entity", "CategoryEntities", "Segments", "Frames", "Version", }); internal_static_google_cloud_videointelligence_v1_ExplicitContentFrame_descriptor = - getDescriptor().getMessageTypes().get(13); + getDescriptor().getMessageTypes().get(14); internal_static_google_cloud_videointelligence_v1_ExplicitContentFrame_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_ExplicitContentFrame_descriptor, @@ -554,23 +592,39 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "TimeOffset", "PornographyLikelihood", }); internal_static_google_cloud_videointelligence_v1_ExplicitContentAnnotation_descriptor = - getDescriptor().getMessageTypes().get(14); + getDescriptor().getMessageTypes().get(15); internal_static_google_cloud_videointelligence_v1_ExplicitContentAnnotation_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_ExplicitContentAnnotation_descriptor, new java.lang.String[] { - "Frames", + "Frames", "Version", }); internal_static_google_cloud_videointelligence_v1_NormalizedBoundingBox_descriptor = - getDescriptor().getMessageTypes().get(15); + getDescriptor().getMessageTypes().get(16); internal_static_google_cloud_videointelligence_v1_NormalizedBoundingBox_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_NormalizedBoundingBox_descriptor, new java.lang.String[] { "Left", "Top", "Right", "Bottom", }); + internal_static_google_cloud_videointelligence_v1_FaceDetectionAnnotation_descriptor = + getDescriptor().getMessageTypes().get(17); + internal_static_google_cloud_videointelligence_v1_FaceDetectionAnnotation_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_videointelligence_v1_FaceDetectionAnnotation_descriptor, + new java.lang.String[] { + "Version", + }); + internal_static_google_cloud_videointelligence_v1_PersonDetectionAnnotation_descriptor = + getDescriptor().getMessageTypes().get(18); + internal_static_google_cloud_videointelligence_v1_PersonDetectionAnnotation_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_videointelligence_v1_PersonDetectionAnnotation_descriptor, + new java.lang.String[] { + "Tracks", "Version", + }); internal_static_google_cloud_videointelligence_v1_FaceSegment_descriptor = - getDescriptor().getMessageTypes().get(16); + getDescriptor().getMessageTypes().get(19); internal_static_google_cloud_videointelligence_v1_FaceSegment_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_FaceSegment_descriptor, @@ -578,7 +632,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Segment", }); internal_static_google_cloud_videointelligence_v1_FaceFrame_descriptor = - getDescriptor().getMessageTypes().get(17); + getDescriptor().getMessageTypes().get(20); internal_static_google_cloud_videointelligence_v1_FaceFrame_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_FaceFrame_descriptor, @@ -586,7 +640,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "NormalizedBoundingBoxes", "TimeOffset", }); internal_static_google_cloud_videointelligence_v1_FaceAnnotation_descriptor = - getDescriptor().getMessageTypes().get(18); + getDescriptor().getMessageTypes().get(21); internal_static_google_cloud_videointelligence_v1_FaceAnnotation_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_FaceAnnotation_descriptor, @@ -594,7 +648,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Thumbnail", "Segments", "Frames", }); internal_static_google_cloud_videointelligence_v1_TimestampedObject_descriptor = - getDescriptor().getMessageTypes().get(19); + getDescriptor().getMessageTypes().get(22); internal_static_google_cloud_videointelligence_v1_TimestampedObject_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_TimestampedObject_descriptor, @@ -602,7 +656,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "NormalizedBoundingBox", "TimeOffset", "Attributes", "Landmarks", }); internal_static_google_cloud_videointelligence_v1_Track_descriptor = - getDescriptor().getMessageTypes().get(20); + getDescriptor().getMessageTypes().get(23); internal_static_google_cloud_videointelligence_v1_Track_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_Track_descriptor, @@ -610,7 +664,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Segment", "TimestampedObjects", "Attributes", "Confidence", }); internal_static_google_cloud_videointelligence_v1_DetectedAttribute_descriptor = - getDescriptor().getMessageTypes().get(21); + getDescriptor().getMessageTypes().get(24); internal_static_google_cloud_videointelligence_v1_DetectedAttribute_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_DetectedAttribute_descriptor, @@ -618,7 +672,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Name", "Confidence", "Value", }); internal_static_google_cloud_videointelligence_v1_DetectedLandmark_descriptor = - getDescriptor().getMessageTypes().get(22); + getDescriptor().getMessageTypes().get(25); internal_static_google_cloud_videointelligence_v1_DetectedLandmark_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_DetectedLandmark_descriptor, @@ -626,7 +680,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Name", "Point", "Confidence", }); internal_static_google_cloud_videointelligence_v1_VideoAnnotationResults_descriptor = - getDescriptor().getMessageTypes().get(23); + getDescriptor().getMessageTypes().get(26); internal_static_google_cloud_videointelligence_v1_VideoAnnotationResults_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_VideoAnnotationResults_descriptor, @@ -639,16 +693,18 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "ShotPresenceLabelAnnotations", "FrameLabelAnnotations", "FaceAnnotations", + "FaceDetectionAnnotations", "ShotAnnotations", "ExplicitAnnotation", "SpeechTranscriptions", "TextAnnotations", "ObjectAnnotations", "LogoRecognitionAnnotations", + "PersonDetectionAnnotations", "Error", }); internal_static_google_cloud_videointelligence_v1_AnnotateVideoResponse_descriptor = - getDescriptor().getMessageTypes().get(24); + getDescriptor().getMessageTypes().get(27); internal_static_google_cloud_videointelligence_v1_AnnotateVideoResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_AnnotateVideoResponse_descriptor, @@ -656,7 +712,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "AnnotationResults", }); internal_static_google_cloud_videointelligence_v1_VideoAnnotationProgress_descriptor = - getDescriptor().getMessageTypes().get(25); + getDescriptor().getMessageTypes().get(28); internal_static_google_cloud_videointelligence_v1_VideoAnnotationProgress_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_VideoAnnotationProgress_descriptor, @@ -664,7 +720,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "InputUri", "ProgressPercent", "StartTime", "UpdateTime", "Feature", "Segment", }); internal_static_google_cloud_videointelligence_v1_AnnotateVideoProgress_descriptor = - getDescriptor().getMessageTypes().get(26); + getDescriptor().getMessageTypes().get(29); internal_static_google_cloud_videointelligence_v1_AnnotateVideoProgress_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_AnnotateVideoProgress_descriptor, @@ -672,7 +728,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "AnnotationProgress", }); internal_static_google_cloud_videointelligence_v1_SpeechTranscriptionConfig_descriptor = - getDescriptor().getMessageTypes().get(27); + getDescriptor().getMessageTypes().get(30); internal_static_google_cloud_videointelligence_v1_SpeechTranscriptionConfig_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_SpeechTranscriptionConfig_descriptor, @@ -688,7 +744,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "EnableWordConfidence", }); internal_static_google_cloud_videointelligence_v1_SpeechContext_descriptor = - getDescriptor().getMessageTypes().get(28); + getDescriptor().getMessageTypes().get(31); internal_static_google_cloud_videointelligence_v1_SpeechContext_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_SpeechContext_descriptor, @@ -696,7 +752,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Phrases", }); internal_static_google_cloud_videointelligence_v1_SpeechTranscription_descriptor = - getDescriptor().getMessageTypes().get(29); + getDescriptor().getMessageTypes().get(32); internal_static_google_cloud_videointelligence_v1_SpeechTranscription_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_SpeechTranscription_descriptor, @@ -704,7 +760,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Alternatives", "LanguageCode", }); internal_static_google_cloud_videointelligence_v1_SpeechRecognitionAlternative_descriptor = - getDescriptor().getMessageTypes().get(30); + getDescriptor().getMessageTypes().get(33); internal_static_google_cloud_videointelligence_v1_SpeechRecognitionAlternative_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_SpeechRecognitionAlternative_descriptor, @@ -712,7 +768,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Transcript", "Confidence", "Words", }); internal_static_google_cloud_videointelligence_v1_WordInfo_descriptor = - getDescriptor().getMessageTypes().get(31); + getDescriptor().getMessageTypes().get(34); internal_static_google_cloud_videointelligence_v1_WordInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_WordInfo_descriptor, @@ -720,7 +776,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "StartTime", "EndTime", "Word", "Confidence", "SpeakerTag", }); internal_static_google_cloud_videointelligence_v1_NormalizedVertex_descriptor = - getDescriptor().getMessageTypes().get(32); + getDescriptor().getMessageTypes().get(35); internal_static_google_cloud_videointelligence_v1_NormalizedVertex_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_NormalizedVertex_descriptor, @@ -728,7 +784,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "X", "Y", }); internal_static_google_cloud_videointelligence_v1_NormalizedBoundingPoly_descriptor = - getDescriptor().getMessageTypes().get(33); + getDescriptor().getMessageTypes().get(36); internal_static_google_cloud_videointelligence_v1_NormalizedBoundingPoly_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_NormalizedBoundingPoly_descriptor, @@ -736,7 +792,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Vertices", }); internal_static_google_cloud_videointelligence_v1_TextSegment_descriptor = - getDescriptor().getMessageTypes().get(34); + getDescriptor().getMessageTypes().get(37); internal_static_google_cloud_videointelligence_v1_TextSegment_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_TextSegment_descriptor, @@ -744,7 +800,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Segment", "Confidence", "Frames", }); internal_static_google_cloud_videointelligence_v1_TextFrame_descriptor = - getDescriptor().getMessageTypes().get(35); + getDescriptor().getMessageTypes().get(38); internal_static_google_cloud_videointelligence_v1_TextFrame_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_TextFrame_descriptor, @@ -752,15 +808,15 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "RotatedBoundingBox", "TimeOffset", }); internal_static_google_cloud_videointelligence_v1_TextAnnotation_descriptor = - getDescriptor().getMessageTypes().get(36); + getDescriptor().getMessageTypes().get(39); internal_static_google_cloud_videointelligence_v1_TextAnnotation_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_TextAnnotation_descriptor, new java.lang.String[] { - "Text", "Segments", + "Text", "Segments", "Version", }); internal_static_google_cloud_videointelligence_v1_ObjectTrackingFrame_descriptor = - getDescriptor().getMessageTypes().get(37); + getDescriptor().getMessageTypes().get(40); internal_static_google_cloud_videointelligence_v1_ObjectTrackingFrame_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_ObjectTrackingFrame_descriptor, @@ -768,15 +824,15 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "NormalizedBoundingBox", "TimeOffset", }); internal_static_google_cloud_videointelligence_v1_ObjectTrackingAnnotation_descriptor = - getDescriptor().getMessageTypes().get(38); + getDescriptor().getMessageTypes().get(41); internal_static_google_cloud_videointelligence_v1_ObjectTrackingAnnotation_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_ObjectTrackingAnnotation_descriptor, new java.lang.String[] { - "Segment", "TrackId", "Entity", "Confidence", "Frames", "TrackInfo", + "Segment", "TrackId", "Entity", "Confidence", "Frames", "Version", "TrackInfo", }); internal_static_google_cloud_videointelligence_v1_LogoRecognitionAnnotation_descriptor = - getDescriptor().getMessageTypes().get(39); + getDescriptor().getMessageTypes().get(42); internal_static_google_cloud_videointelligence_v1_LogoRecognitionAnnotation_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_videointelligence_v1_LogoRecognitionAnnotation_descriptor, diff --git a/proto-google-cloud-video-intelligence-v1/src/main/proto/google/cloud/videointelligence/v1/video_intelligence.proto b/proto-google-cloud-video-intelligence-v1/src/main/proto/google/cloud/videointelligence/v1/video_intelligence.proto index 9dcfda55e..5b3a3e343 100644 --- a/proto-google-cloud-video-intelligence-v1/src/main/proto/google/cloud/videointelligence/v1/video_intelligence.proto +++ b/proto-google-cloud-video-intelligence-v1/src/main/proto/google/cloud/videointelligence/v1/video_intelligence.proto @@ -32,16 +32,18 @@ option java_package = "com.google.cloud.videointelligence.v1"; option php_namespace = "Google\\Cloud\\VideoIntelligence\\V1"; option ruby_package = "Google::Cloud::VideoIntelligence::V1"; -// Service that implements Google Cloud Video Intelligence API. +// Service that implements the Video Intelligence API. service VideoIntelligenceService { option (google.api.default_host) = "videointelligence.googleapis.com"; - option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; // Performs asynchronous video annotation. Progress and results can be // retrieved through the `google.longrunning.Operations` interface. // `Operation.metadata` contains `AnnotateVideoProgress` (progress). // `Operation.response` contains `AnnotateVideoResponse` (results). - rpc AnnotateVideo(AnnotateVideoRequest) returns (google.longrunning.Operation) { + rpc AnnotateVideo(AnnotateVideoRequest) + returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1/videos:annotate" body: "*" @@ -57,20 +59,21 @@ service VideoIntelligenceService { // Video annotation request. message AnnotateVideoRequest { // Input video location. Currently, only - // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are - // supported, which must be specified in the following format: + // [Cloud Storage](https://cloud.google.com/storage/) URIs are + // supported. URIs must be specified in the following format: // `gs://bucket-id/object-id` (other URI formats return - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see - // [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). - // A video URI may include wildcards in `object-id`, and thus identify - // multiple videos. Supported wildcards: '*' to match 0 or more characters; + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For + // more information, see [Request + // URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify + // multiple videos, a video URI may include wildcards in the `object-id`. + // Supported wildcards: '*' to match 0 or more characters; // '?' to match 1 character. If unset, the input video should be embedded - // in the request as `input_content`. If set, `input_content` should be unset. + // in the request as `input_content`. If set, `input_content` must be unset. string input_uri = 1; // The video data bytes. - // If unset, the input video(s) should be specified via `input_uri`. - // If set, `input_uri` should be unset. + // If unset, the input video(s) should be specified via the `input_uri`. + // If set, `input_uri` must be unset. bytes input_content = 6; // Required. Requested video annotation features. @@ -80,16 +83,18 @@ message AnnotateVideoRequest { VideoContext video_context = 3; // Optional. Location where the output (in JSON format) should be stored. - // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) - // URIs are supported, which must be specified in the following format: + // Currently, only [Cloud Storage](https://cloud.google.com/storage/) + // URIs are supported. These must be specified in the following format: // `gs://bucket-id/object-id` (other URI formats return - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see - // [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For + // more information, see [Request + // URIs](https://cloud.google.com/storage/docs/request-endpoints). string output_uri = 4 [(google.api.field_behavior) = OPTIONAL]; // Optional. Cloud region where annotation should take place. Supported cloud - // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region - // is specified, a region will be determined based on video file location. + // regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no + // region is specified, the region will be determined based on video file + // location. string location_id = 5 [(google.api.field_behavior) = OPTIONAL]; } @@ -118,6 +123,9 @@ message VideoContext { // Config for TEXT_DETECTION. TextDetectionConfig text_detection_config = 8; + // Config for PERSON_DETECTION. + PersonDetectionConfig person_detection_config = 11; + // Config for OBJECT_TRACKING. ObjectTrackingConfig object_tracking_config = 13; } @@ -136,7 +144,7 @@ enum Feature { // Explicit content detection. EXPLICIT_CONTENT_DETECTION = 3; - // Human face detection and tracking. + // Human face detection. FACE_DETECTION = 4; // Speech transcription. @@ -150,6 +158,9 @@ enum Feature { // Logo detection, tracking, and recognition. LOGO_RECOGNITION = 12; + + // Person detection. + PERSON_DETECTION = 14; } // Label detection mode. @@ -195,9 +206,9 @@ message LabelDetectionConfig { // If unspecified, defaults to `SHOT_MODE`. LabelDetectionMode label_detection_mode = 1; - // Whether the video has been shot from a stationary (i.e. non-moving) camera. - // When set to true, might improve detection accuracy for moving objects. - // Should be used with `SHOT_AND_FRAME_MODE` enabled. + // Whether the video has been shot from a stationary (i.e., non-moving) + // camera. When set to true, might improve detection accuracy for moving + // objects. Should be used with `SHOT_AND_FRAME_MODE` enabled. bool stationary_camera = 2; // Model to use for label detection. @@ -209,15 +220,15 @@ message LabelDetectionConfig { // frame-level detection. If not set, it is set to 0.4 by default. The valid // range for this threshold is [0.1, 0.9]. Any value set outside of this // range will be clipped. - // Note: for best results please follow the default threshold. We will update + // Note: For best results, follow the default threshold. We will update // the default threshold everytime when we release a new model. float frame_confidence_threshold = 4; // The confidence threshold we perform filtering on the labels from - // video-level and shot-level detections. If not set, it is set to 0.3 by + // video-level and shot-level detections. If not set, it's set to 0.3 by // default. The valid range for this threshold is [0.1, 0.9]. Any value set // outside of this range will be clipped. - // Note: for best results please follow the default threshold. We will update + // Note: For best results, follow the default threshold. We will update // the default threshold everytime when we release a new model. float video_confidence_threshold = 5; } @@ -245,8 +256,29 @@ message FaceDetectionConfig { // "builtin/latest". string model = 1; - // Whether bounding boxes be included in the face annotation output. + // Whether bounding boxes are included in the face annotation output. bool include_bounding_boxes = 2; + + // Whether to enable face attributes detection, such as glasses, dark_glasses, + // mouth_open etc. Ignored if 'include_bounding_boxes' is set to false. + bool include_attributes = 5; +} + +// Config for PERSON_DETECTION. +message PersonDetectionConfig { + // Whether bounding boxes are included in the person detection annotation + // output. + bool include_bounding_boxes = 1; + + // Whether to enable pose landmarks detection. Ignored if + // 'include_bounding_boxes' is set to false. + bool include_pose_landmarks = 2; + + // Whether to enable person attributes detection, such as cloth color (black, + // blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair, + // etc. + // Ignored if 'include_bounding_boxes' is set to false. + bool include_attributes = 3; } // Config for EXPLICIT_CONTENT_DETECTION. @@ -309,7 +341,7 @@ message Entity { // API](https://developers.google.com/knowledge-graph/). string entity_id = 1; - // Textual description, e.g. `Fixed-gear bicycle`. + // Textual description, e.g., `Fixed-gear bicycle`. string description = 2; // Language code for `description` in BCP-47 format. @@ -322,9 +354,9 @@ message LabelAnnotation { Entity entity = 1; // Common categories for the detected entity. - // E.g. when the label is `Terrier` the category is likely `dog`. And in some - // cases there might be more than one categories e.g. `Terrier` could also be - // a `pet`. + // For example, when the label is `Terrier`, the category is likely `dog`. And + // in some cases there might be more than one categories e.g., `Terrier` could + // also be a `pet`. repeated Entity category_entities = 2; // All video segments where a label was detected. @@ -332,6 +364,9 @@ message LabelAnnotation { // All video frames where a label was detected. repeated LabelFrame frames = 4; + + // Feature version. + string version = 5; } // Video frame level annotation results for explicit content. @@ -350,6 +385,9 @@ message ExplicitContentFrame { message ExplicitContentAnnotation { // All video frames where explicit content was detected. repeated ExplicitContentFrame frames = 1; + + // Feature version. + string version = 2; } // Normalized bounding box. @@ -369,14 +407,31 @@ message NormalizedBoundingBox { float bottom = 4; } +// Face detection annotation. +message FaceDetectionAnnotation { + // Feature version. + string version = 5; +} + +// Person detection annotation per video. +message PersonDetectionAnnotation { + // The detected tracks of a person. + repeated Track tracks = 1; + + // Feature version. + string version = 2; +} + // Video segment level annotation results for face detection. message FaceSegment { // Video segment where a face was detected. VideoSegment segment = 1; } -// Video frame level annotation results for face detection. +// Deprecated. No effect. message FaceFrame { + option deprecated = true; + // Normalized Bounding boxes in a frame. // There can be more than one boxes if the same face is detected in multiple // locations within the current frame. @@ -387,8 +442,10 @@ message FaceFrame { google.protobuf.Duration time_offset = 2; } -// Face annotation. +// Deprecated. No effect. message FaceAnnotation { + option deprecated = true; + // Thumbnail of a representative face view (in JPEG format). bytes thumbnail = 1; @@ -411,10 +468,12 @@ message TimestampedObject { google.protobuf.Duration time_offset = 2; // Optional. The attributes of the object in the bounding box. - repeated DetectedAttribute attributes = 3 [(google.api.field_behavior) = OPTIONAL]; + repeated DetectedAttribute attributes = 3 + [(google.api.field_behavior) = OPTIONAL]; // Optional. The detected landmarks. - repeated DetectedLandmark landmarks = 4 [(google.api.field_behavior) = OPTIONAL]; + repeated DetectedLandmark landmarks = 4 + [(google.api.field_behavior) = OPTIONAL]; } // A track of an object instance. @@ -426,7 +485,8 @@ message Track { repeated TimestampedObject timestamped_objects = 2; // Optional. Attributes in the track level. - repeated DetectedAttribute attributes = 3 [(google.api.field_behavior) = OPTIONAL]; + repeated DetectedAttribute attributes = 3 + [(google.api.field_behavior) = OPTIONAL]; // Optional. The confidence score of the tracked object. float confidence = 4 [(google.api.field_behavior) = OPTIONAL]; @@ -434,7 +494,7 @@ message Track { // A generic detected attribute represented by name in string format. message DetectedAttribute { - // The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc. + // The name of the attribute, for example, glasses, dark_glasses, mouth_open. // A full list of supported type names will be provided in the document. string name = 1; @@ -449,7 +509,7 @@ message DetectedAttribute { // A generic detected landmark represented by name in string format and a 2D // location. message DetectedLandmark { - // The name of this landmark, i.e. left_hand, right_shoulder. + // The name of this landmark, for example, left_hand, right_shoulder. string name = 1; // The 2D point of the detected landmark using the normalized image @@ -463,17 +523,17 @@ message DetectedLandmark { // Annotation results for a single video. message VideoAnnotationResults { // Video file location in - // [Google Cloud Storage](https://cloud.google.com/storage/). + // [Cloud Storage](https://cloud.google.com/storage/). string input_uri = 1; // Video segment on which the annotation is run. VideoSegment segment = 10; - // Topical label annotations on video level or user specified segment level. + // Topical label annotations on video level or user-specified segment level. // There is exactly one element for each unique label. repeated LabelAnnotation segment_label_annotations = 2; - // Presence label annotations on video level or user specified segment level. + // Presence label annotations on video level or user-specified segment level. // There is exactly one element for each unique label. Compared to the // existing topical `segment_label_annotations`, this field presents more // fine-grained, segment-level labels detected in video content and is made @@ -496,8 +556,11 @@ message VideoAnnotationResults { // There is exactly one element for each unique label. repeated LabelAnnotation frame_label_annotations = 4; - // Face annotations. There is exactly one element for each unique face. - repeated FaceAnnotation face_annotations = 5; + // Deprecated. Please use `face_detection_annotations` instead. + repeated FaceAnnotation face_annotations = 5 [deprecated = true]; + + // Face detection annotations. + repeated FaceDetectionAnnotation face_detection_annotations = 13; // Shot annotations. Each shot is represented as a video segment. repeated VideoSegment shot_annotations = 6; @@ -519,6 +582,9 @@ message VideoAnnotationResults { // Annotations for list of logos detected, tracked and recognized in video. repeated LogoRecognitionAnnotation logo_recognition_annotations = 19; + // Person detection annotations. + repeated PersonDetectionAnnotation person_detection_annotations = 20; + // If set, indicates an error. Note that for a single `AnnotateVideoRequest` // some videos may succeed and some may fail. google.rpc.Status error = 9; @@ -535,7 +601,7 @@ message AnnotateVideoResponse { // Annotation progress for a single video. message VideoAnnotationProgress { // Video file location in - // [Google Cloud Storage](https://cloud.google.com/storage/). + // [Cloud Storage](https://cloud.google.com/storage/). string input_uri = 1; // Approximate percentage processed thus far. Guaranteed to be @@ -549,11 +615,11 @@ message VideoAnnotationProgress { google.protobuf.Timestamp update_time = 4; // Specifies which feature is being tracked if the request contains more than - // one features. + // one feature. Feature feature = 5; // Specifies which segment is being tracked if the request contains more than - // one segments. + // one segment. VideoSegment segment = 6; } @@ -588,7 +654,8 @@ message SpeechTranscriptionConfig { bool filter_profanity = 3 [(google.api.field_behavior) = OPTIONAL]; // Optional. A means to provide context to assist the speech recognition. - repeated SpeechContext speech_contexts = 4 [(google.api.field_behavior) = OPTIONAL]; + repeated SpeechContext speech_contexts = 4 + [(google.api.field_behavior) = OPTIONAL]; // Optional. If 'true', adds punctuation to recognition result hypotheses. // This feature is only available in select languages. Setting this for @@ -596,7 +663,8 @@ message SpeechTranscriptionConfig { // does not add punctuation to result hypotheses. NOTE: "This is currently // offered as an experimental service, complimentary to all users. In the // future this may be exclusively available as a premium feature." - bool enable_automatic_punctuation = 5 [(google.api.field_behavior) = OPTIONAL]; + bool enable_automatic_punctuation = 5 + [(google.api.field_behavior) = OPTIONAL]; // Optional. For file formats, such as MXF or MKV, supporting multiple audio // tracks, specify up to two tracks. Default: track 0. @@ -606,14 +674,14 @@ message SpeechTranscriptionConfig { // the top alternative of the recognition result using a speaker_tag provided // in the WordInfo. // Note: When this is true, we send all the words from the beginning of the - // audio for the top alternative in every consecutive responses. + // audio for the top alternative in every consecutive response. // This is done in order to improve our speaker tags as our models learn to // identify the speakers in the conversation over time. bool enable_speaker_diarization = 7 [(google.api.field_behavior) = OPTIONAL]; - // Optional. If set, specifies the estimated number of speakers in the conversation. - // If not set, defaults to '2'. - // Ignored unless enable_speaker_diarization is set to true. + // Optional. If set, specifies the estimated number of speakers in the + // conversation. If not set, defaults to '2'. Ignored unless + // enable_speaker_diarization is set to true. int32 diarization_speaker_count = 8 [(google.api.field_behavior) = OPTIONAL]; // Optional. If `true`, the top result includes a list of words and the @@ -642,9 +710,9 @@ message SpeechTranscription { // ranked by the recognizer. repeated SpeechRecognitionAlternative alternatives = 1; - // Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of - // the language in this result. This language code was detected to have the - // most likelihood of being spoken in the audio. + // Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) + // language tag of the language in this result. This language code was + // detected to have the most likelihood of being spoken in the audio. string language_code = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; } @@ -662,8 +730,8 @@ message SpeechRecognitionAlternative { float confidence = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. A list of word-specific information for each recognized word. - // Note: When `enable_speaker_diarization` is true, you will see all the words - // from the beginning of the audio. + // Note: When `enable_speaker_diarization` is set to true, you will see all + // the words from the beginning of the audio. repeated WordInfo words = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; } @@ -767,6 +835,9 @@ message TextAnnotation { // All video segments where OCR detected text appears. repeated TextSegment segments = 2; + + // Feature version. + string version = 3; } // Video frame level annotations for object detection and tracking. This field @@ -808,6 +879,9 @@ message ObjectTrackingAnnotation { // messages in frames. // Streaming mode: it can only be one ObjectTrackingFrame message in frames. repeated ObjectTrackingFrame frames = 2; + + // Feature version. + string version = 6; } // Annotation corresponding to one detected, tracked and recognized logo class. diff --git a/synth.metadata b/synth.metadata index 2f6c468b0..b9b2591d5 100644 --- a/synth.metadata +++ b/synth.metadata @@ -4,15 +4,15 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/java-video-intelligence.git", - "sha": "fe80193375dae657b5d801e58e9387eaaf4ee8e9" + "sha": "8eb40c3d6eaa0302ed722889619ccac5107d649b" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "251d9358730b5cec7625d9f42879444a7d5369d5", - "internalRef": "316978059" + "sha": "e7375a91e7942c562c61495a908f4f6d50b4ea3f", + "internalRef": "334093713" } }, { @@ -128,7 +128,6 @@ "CODE_OF_CONDUCT.md", "CONTRIBUTING.md", "LICENSE", - "README.md", "codecov.yaml", "google-cloud-video-intelligence/src/main/java/com/google/cloud/videointelligence/v1/VideoIntelligenceServiceClient.java", "google-cloud-video-intelligence/src/main/java/com/google/cloud/videointelligence/v1/VideoIntelligenceServiceSettings.java", @@ -221,6 +220,8 @@ "proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/ExplicitContentFrameOrBuilder.java", "proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceAnnotation.java", "proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceAnnotationOrBuilder.java", + "proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceDetectionAnnotation.java", + "proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceDetectionAnnotationOrBuilder.java", "proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceDetectionConfig.java", "proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceDetectionConfigOrBuilder.java", "proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/FaceFrame.java", @@ -252,6 +253,10 @@ "proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/ObjectTrackingConfigOrBuilder.java", "proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/ObjectTrackingFrame.java", "proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/ObjectTrackingFrameOrBuilder.java", + "proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/PersonDetectionAnnotation.java", + "proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/PersonDetectionAnnotationOrBuilder.java", + "proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/PersonDetectionConfig.java", + "proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/PersonDetectionConfigOrBuilder.java", "proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/ShotChangeDetectionConfig.java", "proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/ShotChangeDetectionConfigOrBuilder.java", "proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/SpeechContext.java",