diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 869311c94..baec78fae 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -10,7 +10,7 @@ Thanks for stopping by to let us know something could be better! Please run down the following list and make sure you've tried the usual "quick fixes": - - Search the issues already opened: https://github.com/googleapis/google-cloud-speect/issues + - Search the issues already opened: https://github.com/googleapis/java-speech/issues - Check for answers on StackOverflow: http://stackoverflow.com/questions/tagged/google-cloud-platform If you are still having issues, please include as much information as possible: @@ -48,4 +48,4 @@ Any relevant stacktrace here. Following these steps guarantees the quickest resolution possible. -Thanks! \ No newline at end of file +Thanks! diff --git a/.github/release-please.yml b/.github/release-please.yml new file mode 100644 index 000000000..827446828 --- /dev/null +++ b/.github/release-please.yml @@ -0,0 +1 @@ +releaseType: java-yoshi diff --git a/.kokoro/build.sh b/.kokoro/build.sh index 2ffb5ef7f..bcd1e410c 100755 --- a/.kokoro/build.sh +++ b/.kokoro/build.sh @@ -39,6 +39,7 @@ case ${JOB_TYPE} in test) mvn test -B bash ${KOKORO_GFILE_DIR}/codecov.sh + bash .kokoro/coerce_logs.sh ;; lint) mvn com.coveo:fmt-maven-plugin:check @@ -48,6 +49,7 @@ javadoc) ;; integration) mvn -B ${INTEGRATION_TEST_ARGS} -DtrimStackTrace=false -fae verify + bash .kokoro/coerce_logs.sh ;; *) ;; diff --git a/.kokoro/coerce_logs.sh b/.kokoro/coerce_logs.sh new file mode 100755 index 000000000..5cf7ba49e --- /dev/null +++ b/.kokoro/coerce_logs.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script finds and moves sponge logs so that they can be found by placer +# and are not flagged as flaky by sponge. + +set -eo pipefail + +## Get the directory of the build script +scriptDir=$(realpath $(dirname "${BASH_SOURCE[0]}")) +## cd to the parent directory, i.e. the root of the git repo +cd ${scriptDir}/.. + +job=$(basename ${KOKORO_JOB_NAME}) + +echo "coercing sponge logs..." +for xml in `find . -name *-sponge_log.xml` +do + echo "processing ${xml}" + class=$(basename ${xml} | cut -d- -f2) + dir=$(dirname ${xml})/${job}/${class} + text=$(dirname ${xml})/${class}-sponge_log.txt + mkdir -p ${dir} + mv ${xml} ${dir}/sponge_log.xml + mv ${text} ${dir}/sponge_log.txt +done diff --git a/.kokoro/continuous/common.cfg b/.kokoro/continuous/common.cfg index 0949a0076..9356b7e85 100644 --- a/.kokoro/continuous/common.cfg +++ b/.kokoro/continuous/common.cfg @@ -4,6 +4,7 @@ action { define_artifacts { regex: "**/*sponge_log.xml" + regex: "**/*sponge_log.txt" } } diff --git a/.kokoro/nightly/common.cfg b/.kokoro/nightly/common.cfg index 0949a0076..9356b7e85 100644 --- a/.kokoro/nightly/common.cfg +++ b/.kokoro/nightly/common.cfg @@ -4,6 +4,7 @@ action { define_artifacts { regex: "**/*sponge_log.xml" + regex: "**/*sponge_log.txt" } } diff --git a/.kokoro/presubmit/common.cfg b/.kokoro/presubmit/common.cfg index f0d953770..139314517 100644 --- a/.kokoro/presubmit/common.cfg +++ b/.kokoro/presubmit/common.cfg @@ -4,6 +4,7 @@ action { define_artifacts { regex: "**/*sponge_log.xml" + regex: "**/*sponge_log.txt" } } diff --git a/.kokoro/release/snapshot.sh b/.kokoro/release/snapshot.sh index bf738c56d..098168a73 100755 --- a/.kokoro/release/snapshot.sh +++ b/.kokoro/release/snapshot.sh @@ -19,6 +19,9 @@ source $(dirname "$0")/common.sh MAVEN_SETTINGS_FILE=$(realpath $(dirname "$0")/../../)/settings.xml pushd $(dirname "$0")/../../ +# ensure we're trying to push a snapshot (no-result returns non-zero exit code) +grep SNAPSHOT versions.txt + setup_environment_secrets create_settings_xml_file "settings.xml" @@ -27,4 +30,4 @@ mvn clean install deploy -B \ -DperformRelease=true \ -Dgpg.executable=gpg \ -Dgpg.passphrase=${GPG_PASSPHRASE} \ - -Dgpg.homedir=${GPG_HOMEDIR} \ No newline at end of file + -Dgpg.homedir=${GPG_HOMEDIR} diff --git a/.kokoro/release/stage.sh b/.kokoro/release/stage.sh index b1b1b01c6..3c482cbc5 100755 --- a/.kokoro/release/stage.sh +++ b/.kokoro/release/stage.sh @@ -28,6 +28,7 @@ create_settings_xml_file "settings.xml" mvn clean install deploy -B \ --settings ${MAVEN_SETTINGS_FILE} \ + -DskipTests=true \ -DperformRelease=true \ -Dgpg.executable=gpg \ -Dgpg.passphrase=${GPG_PASSPHRASE} \ diff --git a/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/SpeechClient.java b/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/SpeechClient.java index 2740614db..d7a473d10 100644 --- a/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/SpeechClient.java +++ b/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/SpeechClient.java @@ -196,9 +196,9 @@ public final OperationsClient getOperationsClient() { * } * * - * @param config *Required* Provides information to the recognizer that specifies how to - * process the request. - * @param audio *Required* The audio data to be recognized. + * @param config Required. Provides information to the recognizer that specifies how to process + * the request. + * @param audio Required. The audio data to be recognized. * @throws com.google.api.gax.rpc.ApiException if the remote call fails */ public final RecognizeResponse recognize(RecognitionConfig config, RecognitionAudio audio) { @@ -283,7 +283,8 @@ public final UnaryCallable recognizeCallabl /** * Performs asynchronous speech recognition: receive results via the google.longrunning.Operations * interface. Returns either an `Operation.error` or an `Operation.response` which contains a - * `LongRunningRecognizeResponse` message. + * `LongRunningRecognizeResponse` message. For more information on asynchronous speech + * recognition, see the [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize). * *

Sample code: * @@ -305,9 +306,9 @@ public final UnaryCallable recognizeCallabl * } * * - * @param config *Required* Provides information to the recognizer that specifies how to - * process the request. - * @param audio *Required* The audio data to be recognized. + * @param config Required. Provides information to the recognizer that specifies how to process + * the request. + * @param audio Required. The audio data to be recognized. * @throws com.google.api.gax.rpc.ApiException if the remote call fails */ @BetaApi( @@ -324,7 +325,8 @@ public final UnaryCallable recognizeCallabl /** * Performs asynchronous speech recognition: receive results via the google.longrunning.Operations * interface. Returns either an `Operation.error` or an `Operation.response` which contains a - * `LongRunningRecognizeResponse` message. + * `LongRunningRecognizeResponse` message. For more information on asynchronous speech + * recognition, see the [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize). * *

Sample code: * @@ -364,7 +366,8 @@ public final UnaryCallable recognizeCallabl /** * Performs asynchronous speech recognition: receive results via the google.longrunning.Operations * interface. Returns either an `Operation.error` or an `Operation.response` which contains a - * `LongRunningRecognizeResponse` message. + * `LongRunningRecognizeResponse` message. For more information on asynchronous speech + * recognition, see the [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize). * *

Sample code: * @@ -403,7 +406,8 @@ public final UnaryCallable recognizeCallabl /** * Performs asynchronous speech recognition: receive results via the google.longrunning.Operations * interface. Returns either an `Operation.error` or an `Operation.response` which contains a - * `LongRunningRecognizeResponse` message. + * `LongRunningRecognizeResponse` message. For more information on asynchronous speech + * recognition, see the [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize). * *

Sample code: * diff --git a/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/package-info.java b/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/package-info.java index 2ee8683d8..e5b7cccf0 100644 --- a/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/package-info.java +++ b/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/package-info.java @@ -15,7 +15,7 @@ */ /** - * A client to Cloud Speech API. + * A client to Cloud Speech-to-Text API. * *

The interfaces provided are listed below, along with usage samples. * diff --git a/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/stub/GrpcSpeechCallableFactory.java b/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/stub/GrpcSpeechCallableFactory.java index de84ff3a5..bf7118c90 100644 --- a/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/stub/GrpcSpeechCallableFactory.java +++ b/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/stub/GrpcSpeechCallableFactory.java @@ -36,7 +36,7 @@ // AUTO-GENERATED DOCUMENTATION AND CLASS /** - * gRPC callable factory implementation for Cloud Speech API. + * gRPC callable factory implementation for Cloud Speech-to-Text API. * *

This class is for advanced usage. */ diff --git a/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/stub/GrpcSpeechStub.java b/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/stub/GrpcSpeechStub.java index 7838a98ff..f6ec786ef 100644 --- a/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/stub/GrpcSpeechStub.java +++ b/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/stub/GrpcSpeechStub.java @@ -41,7 +41,7 @@ // AUTO-GENERATED DOCUMENTATION AND CLASS /** - * gRPC stub implementation for Cloud Speech API. + * gRPC stub implementation for Cloud Speech-to-Text API. * *

This class is for advanced usage and reflects the underlying API directly. */ diff --git a/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/stub/SpeechStub.java b/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/stub/SpeechStub.java index 2dd6572c3..26f5a6fec 100644 --- a/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/stub/SpeechStub.java +++ b/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/stub/SpeechStub.java @@ -33,7 +33,7 @@ // AUTO-GENERATED DOCUMENTATION AND CLASS /** - * Base stub class for Cloud Speech API. + * Base stub class for Cloud Speech-to-Text API. * *

This class is for advanced usage and reflects the underlying API directly. */ diff --git a/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/stub/SpeechStubSettings.java b/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/stub/SpeechStubSettings.java index 43f54b821..e825cc07b 100644 --- a/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/stub/SpeechStubSettings.java +++ b/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/stub/SpeechStubSettings.java @@ -233,10 +233,10 @@ public static class Builder extends StubSettings.Builder * - * @param config *Required* Provides information to the recognizer that specifies how to - * process the request. - * @param audio *Required* The audio data to be recognized. + * @param config Required. Provides information to the recognizer that specifies how to process + * the request. + * @param audio Required. The audio data to be recognized. * @throws com.google.api.gax.rpc.ApiException if the remote call fails */ public final RecognizeResponse recognize(RecognitionConfig config, RecognitionAudio audio) { @@ -283,7 +283,8 @@ public final UnaryCallable recognizeCallabl /** * Performs asynchronous speech recognition: receive results via the google.longrunning.Operations * interface. Returns either an `Operation.error` or an `Operation.response` which contains a - * `LongRunningRecognizeResponse` message. + * `LongRunningRecognizeResponse` message. For more information on asynchronous speech + * recognition, see the [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize). * *

Sample code: * @@ -305,9 +306,9 @@ public final UnaryCallable recognizeCallabl * } * * - * @param config *Required* Provides information to the recognizer that specifies how to - * process the request. - * @param audio *Required* The audio data to be recognized. + * @param config Required. Provides information to the recognizer that specifies how to process + * the request. + * @param audio Required. The audio data to be recognized. * @throws com.google.api.gax.rpc.ApiException if the remote call fails */ @BetaApi( @@ -324,7 +325,8 @@ public final UnaryCallable recognizeCallabl /** * Performs asynchronous speech recognition: receive results via the google.longrunning.Operations * interface. Returns either an `Operation.error` or an `Operation.response` which contains a - * `LongRunningRecognizeResponse` message. + * `LongRunningRecognizeResponse` message. For more information on asynchronous speech + * recognition, see the [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize). * *

Sample code: * @@ -364,7 +366,8 @@ public final UnaryCallable recognizeCallabl /** * Performs asynchronous speech recognition: receive results via the google.longrunning.Operations * interface. Returns either an `Operation.error` or an `Operation.response` which contains a - * `LongRunningRecognizeResponse` message. + * `LongRunningRecognizeResponse` message. For more information on asynchronous speech + * recognition, see the [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize). * *

Sample code: * @@ -403,7 +406,8 @@ public final UnaryCallable recognizeCallabl /** * Performs asynchronous speech recognition: receive results via the google.longrunning.Operations * interface. Returns either an `Operation.error` or an `Operation.response` which contains a - * `LongRunningRecognizeResponse` message. + * `LongRunningRecognizeResponse` message. For more information on asynchronous speech + * recognition, see the [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize). * *

Sample code: * diff --git a/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/package-info.java b/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/package-info.java index b16debe8e..92eb1bcc3 100644 --- a/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/package-info.java +++ b/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/package-info.java @@ -15,7 +15,7 @@ */ /** - * A client to Cloud Speech API. + * A client to Cloud Speech-to-Text API. * *

The interfaces provided are listed below, along with usage samples. * diff --git a/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/stub/GrpcSpeechCallableFactory.java b/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/stub/GrpcSpeechCallableFactory.java index 819322ad0..ad177d5d9 100644 --- a/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/stub/GrpcSpeechCallableFactory.java +++ b/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/stub/GrpcSpeechCallableFactory.java @@ -36,7 +36,7 @@ // AUTO-GENERATED DOCUMENTATION AND CLASS /** - * gRPC callable factory implementation for Cloud Speech API. + * gRPC callable factory implementation for Cloud Speech-to-Text API. * *

This class is for advanced usage. */ diff --git a/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/stub/GrpcSpeechStub.java b/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/stub/GrpcSpeechStub.java index 89cd0deda..f74729b0a 100644 --- a/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/stub/GrpcSpeechStub.java +++ b/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/stub/GrpcSpeechStub.java @@ -41,7 +41,7 @@ // AUTO-GENERATED DOCUMENTATION AND CLASS /** - * gRPC stub implementation for Cloud Speech API. + * gRPC stub implementation for Cloud Speech-to-Text API. * *

This class is for advanced usage and reflects the underlying API directly. */ diff --git a/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/stub/SpeechStub.java b/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/stub/SpeechStub.java index 9ac8a31b8..63670b6bc 100644 --- a/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/stub/SpeechStub.java +++ b/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/stub/SpeechStub.java @@ -33,7 +33,7 @@ // AUTO-GENERATED DOCUMENTATION AND CLASS /** - * Base stub class for Cloud Speech API. + * Base stub class for Cloud Speech-to-Text API. * *

This class is for advanced usage and reflects the underlying API directly. */ diff --git a/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/stub/SpeechStubSettings.java b/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/stub/SpeechStubSettings.java index 6c83892ff..5d86db203 100644 --- a/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/stub/SpeechStubSettings.java +++ b/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/stub/SpeechStubSettings.java @@ -233,10 +233,10 @@ public static class Builder extends StubSettings.Builder */ public void longRunningRecognize( @@ -350,6 +352,8 @@ public void recognize( * google.longrunning.Operations interface. Returns either an * `Operation.error` or an `Operation.response` which contains * a `LongRunningRecognizeResponse` message. + * For more information on asynchronous speech recognition, see the + * [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize). * */ public void longRunningRecognize( @@ -422,6 +426,8 @@ public com.google.cloud.speech.v1.RecognizeResponse recognize( * google.longrunning.Operations interface. Returns either an * `Operation.error` or an `Operation.response` which contains * a `LongRunningRecognizeResponse` message. + * For more information on asynchronous speech recognition, see the + * [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize). * */ public com.google.longrunning.Operation longRunningRecognize( @@ -475,6 +481,8 @@ protected SpeechFutureStub build(io.grpc.Channel channel, io.grpc.CallOptions ca * google.longrunning.Operations interface. Returns either an * `Operation.error` or an `Operation.response` which contains * a `LongRunningRecognizeResponse` message. + * For more information on asynchronous speech recognition, see the + * [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize). * */ public com.google.common.util.concurrent.ListenableFuture diff --git a/grpc-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechGrpc.java b/grpc-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechGrpc.java index bc04a7be0..cc71caee7 100644 --- a/grpc-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechGrpc.java +++ b/grpc-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechGrpc.java @@ -265,6 +265,8 @@ public void recognize( * google.longrunning.Operations interface. Returns either an * `Operation.error` or an `Operation.response` which contains * a `LongRunningRecognizeResponse` message. + * For more information on asynchronous speech recognition, see the + * [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize). * */ public void longRunningRecognize( @@ -363,6 +365,8 @@ public void recognize( * google.longrunning.Operations interface. Returns either an * `Operation.error` or an `Operation.response` which contains * a `LongRunningRecognizeResponse` message. + * For more information on asynchronous speech recognition, see the + * [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize). * */ public void longRunningRecognize( @@ -436,6 +440,8 @@ public com.google.cloud.speech.v1p1beta1.RecognizeResponse recognize( * google.longrunning.Operations interface. Returns either an * `Operation.error` or an `Operation.response` which contains * a `LongRunningRecognizeResponse` message. + * For more information on asynchronous speech recognition, see the + * [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize). * */ public com.google.longrunning.Operation longRunningRecognize( @@ -489,6 +495,8 @@ protected SpeechFutureStub build(io.grpc.Channel channel, io.grpc.CallOptions ca * google.longrunning.Operations interface. Returns either an * `Operation.error` or an `Operation.response` which contains * a `LongRunningRecognizeResponse` message. + * For more information on asynchronous speech recognition, see the + * [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize). * */ public com.google.common.util.concurrent.ListenableFuture diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/LongRunningRecognizeRequest.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/LongRunningRecognizeRequest.java index de537b24f..953046c07 100644 --- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/LongRunningRecognizeRequest.java +++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/LongRunningRecognizeRequest.java @@ -136,11 +136,13 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *

-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public boolean hasConfig() { return config_ != null; @@ -149,11 +151,13 @@ public boolean hasConfig() { * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1.RecognitionConfig getConfig() { return config_ == null @@ -164,11 +168,13 @@ public com.google.cloud.speech.v1.RecognitionConfig getConfig() { * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1.RecognitionConfigOrBuilder getConfigOrBuilder() { return getConfig(); @@ -180,10 +186,12 @@ public com.google.cloud.speech.v1.RecognitionConfigOrBuilder getConfigOrBuilder( * * *
-   * *Required* The audio data to be recognized.
+   * Required. The audio data to be recognized.
    * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public boolean hasAudio() { return audio_ != null; @@ -192,10 +200,12 @@ public boolean hasAudio() { * * *
-   * *Required* The audio data to be recognized.
+   * Required. The audio data to be recognized.
    * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1.RecognitionAudio getAudio() { return audio_ == null @@ -206,10 +216,12 @@ public com.google.cloud.speech.v1.RecognitionAudio getAudio() { * * *
-   * *Required* The audio data to be recognized.
+   * Required. The audio data to be recognized.
    * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1.RecognitionAudioOrBuilder getAudioOrBuilder() { return getAudio(); @@ -585,11 +597,13 @@ public Builder mergeFrom( * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public boolean hasConfig() { return configBuilder_ != null || config_ != null; @@ -598,11 +612,13 @@ public boolean hasConfig() { * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1.RecognitionConfig getConfig() { if (configBuilder_ == null) { @@ -617,11 +633,13 @@ public com.google.cloud.speech.v1.RecognitionConfig getConfig() { * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder setConfig(com.google.cloud.speech.v1.RecognitionConfig value) { if (configBuilder_ == null) { @@ -640,11 +658,13 @@ public Builder setConfig(com.google.cloud.speech.v1.RecognitionConfig value) { * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder setConfig(com.google.cloud.speech.v1.RecognitionConfig.Builder builderForValue) { if (configBuilder_ == null) { @@ -660,11 +680,13 @@ public Builder setConfig(com.google.cloud.speech.v1.RecognitionConfig.Builder bu * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder mergeConfig(com.google.cloud.speech.v1.RecognitionConfig value) { if (configBuilder_ == null) { @@ -687,11 +709,13 @@ public Builder mergeConfig(com.google.cloud.speech.v1.RecognitionConfig value) { * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder clearConfig() { if (configBuilder_ == null) { @@ -708,11 +732,13 @@ public Builder clearConfig() { * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1.RecognitionConfig.Builder getConfigBuilder() { @@ -723,11 +749,13 @@ public com.google.cloud.speech.v1.RecognitionConfig.Builder getConfigBuilder() { * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1.RecognitionConfigOrBuilder getConfigOrBuilder() { if (configBuilder_ != null) { @@ -742,11 +770,13 @@ public com.google.cloud.speech.v1.RecognitionConfigOrBuilder getConfigOrBuilder( * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.speech.v1.RecognitionConfig, @@ -775,10 +805,12 @@ public com.google.cloud.speech.v1.RecognitionConfigOrBuilder getConfigOrBuilder( * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public boolean hasAudio() { return audioBuilder_ != null || audio_ != null; @@ -787,10 +819,12 @@ public boolean hasAudio() { * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1.RecognitionAudio getAudio() { if (audioBuilder_ == null) { @@ -805,10 +839,12 @@ public com.google.cloud.speech.v1.RecognitionAudio getAudio() { * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder setAudio(com.google.cloud.speech.v1.RecognitionAudio value) { if (audioBuilder_ == null) { @@ -827,10 +863,12 @@ public Builder setAudio(com.google.cloud.speech.v1.RecognitionAudio value) { * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder setAudio(com.google.cloud.speech.v1.RecognitionAudio.Builder builderForValue) { if (audioBuilder_ == null) { @@ -846,10 +884,12 @@ public Builder setAudio(com.google.cloud.speech.v1.RecognitionAudio.Builder buil * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder mergeAudio(com.google.cloud.speech.v1.RecognitionAudio value) { if (audioBuilder_ == null) { @@ -872,10 +912,12 @@ public Builder mergeAudio(com.google.cloud.speech.v1.RecognitionAudio value) { * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder clearAudio() { if (audioBuilder_ == null) { @@ -892,10 +934,12 @@ public Builder clearAudio() { * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1.RecognitionAudio.Builder getAudioBuilder() { @@ -906,10 +950,12 @@ public com.google.cloud.speech.v1.RecognitionAudio.Builder getAudioBuilder() { * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1.RecognitionAudioOrBuilder getAudioOrBuilder() { if (audioBuilder_ != null) { @@ -924,10 +970,12 @@ public com.google.cloud.speech.v1.RecognitionAudioOrBuilder getAudioOrBuilder() * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.speech.v1.RecognitionAudio, diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/LongRunningRecognizeRequestOrBuilder.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/LongRunningRecognizeRequestOrBuilder.java index 32bb414fd..94d9ff7ff 100644 --- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/LongRunningRecognizeRequestOrBuilder.java +++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/LongRunningRecognizeRequestOrBuilder.java @@ -27,33 +27,39 @@ public interface LongRunningRecognizeRequestOrBuilder * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ boolean hasConfig(); /** * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ com.google.cloud.speech.v1.RecognitionConfig getConfig(); /** * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ com.google.cloud.speech.v1.RecognitionConfigOrBuilder getConfigOrBuilder(); @@ -61,30 +67,36 @@ public interface LongRunningRecognizeRequestOrBuilder * * *
-   * *Required* The audio data to be recognized.
+   * Required. The audio data to be recognized.
    * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ boolean hasAudio(); /** * * *
-   * *Required* The audio data to be recognized.
+   * Required. The audio data to be recognized.
    * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ com.google.cloud.speech.v1.RecognitionAudio getAudio(); /** * * *
-   * *Required* The audio data to be recognized.
+   * Required. The audio data to be recognized.
    * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ com.google.cloud.speech.v1.RecognitionAudioOrBuilder getAudioOrBuilder(); } diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/LongRunningRecognizeResponse.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/LongRunningRecognizeResponse.java index a98f30955..71cfd530c 100644 --- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/LongRunningRecognizeResponse.java +++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/LongRunningRecognizeResponse.java @@ -125,7 +125,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -138,7 +138,7 @@ public java.util.List getRes * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -152,7 +152,7 @@ public java.util.List getRes * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -165,7 +165,7 @@ public int getResultsCount() { * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -178,7 +178,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionResult getResults(int index) * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -575,7 +575,7 @@ private void ensureResultsIsMutable() { * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -592,7 +592,7 @@ public java.util.List getRes * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -609,7 +609,7 @@ public int getResultsCount() { * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -626,7 +626,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionResult getResults(int index) * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -649,7 +649,7 @@ public Builder setResults(int index, com.google.cloud.speech.v1.SpeechRecognitio * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -670,7 +670,7 @@ public Builder setResults( * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -693,7 +693,7 @@ public Builder addResults(com.google.cloud.speech.v1.SpeechRecognitionResult val * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -716,7 +716,7 @@ public Builder addResults(int index, com.google.cloud.speech.v1.SpeechRecognitio * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -737,7 +737,7 @@ public Builder addResults( * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -758,7 +758,7 @@ public Builder addResults( * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -779,7 +779,7 @@ public Builder addAllResults( * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -799,7 +799,7 @@ public Builder clearResults() { * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -819,7 +819,7 @@ public Builder removeResults(int index) { * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -832,7 +832,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionResult.Builder getResultsBuil * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -850,7 +850,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionResultOrBuilder getResultsOrB * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -868,7 +868,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionResultOrBuilder getResultsOrB * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -882,7 +882,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionResult.Builder addResultsBuil * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -897,7 +897,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionResult.Builder addResultsBuil * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/LongRunningRecognizeResponseOrBuilder.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/LongRunningRecognizeResponseOrBuilder.java index 55f48e826..7eb5b387c 100644 --- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/LongRunningRecognizeResponseOrBuilder.java +++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/LongRunningRecognizeResponseOrBuilder.java @@ -27,7 +27,7 @@ public interface LongRunningRecognizeResponseOrBuilder * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -38,7 +38,7 @@ public interface LongRunningRecognizeResponseOrBuilder * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -49,7 +49,7 @@ public interface LongRunningRecognizeResponseOrBuilder * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -60,7 +60,7 @@ public interface LongRunningRecognizeResponseOrBuilder * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -72,7 +72,7 @@ public interface LongRunningRecognizeResponseOrBuilder * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognitionAudio.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognitionAudio.java index fd4bda5f6..1e5624573 100644 --- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognitionAudio.java +++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognitionAudio.java @@ -24,8 +24,8 @@ *
  * Contains audio data in the encoding specified in the `RecognitionConfig`.
  * Either `content` or `uri` must be supplied. Supplying both or neither
- * returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT].
- * See [content limits](/speech-to-text/quotas#content).
+ * returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. See
+ * [content limits](https://cloud.google.com/speech-to-text/quotas#content).
  * 
* * Protobuf type {@code google.cloud.speech.v1.RecognitionAudio} @@ -159,7 +159,7 @@ public AudioSourceCase getAudioSourceCase() { * *
    * The audio data bytes encoded as specified in
-   * `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a
+   * `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
    * pure binary representation, whereas JSON representations use base64.
    * 
* @@ -182,9 +182,8 @@ public com.google.protobuf.ByteString getContent() { * Currently, only Google Cloud Storage URIs are * supported, which must be specified in the following format: * `gs://bucket_name/object_name` (other URI formats return - * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). - * For more information, see [Request - * URIs](https://cloud.google.com/storage/docs/reference-uris). + * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). * * * string uri = 2; @@ -214,9 +213,8 @@ public java.lang.String getUri() { * Currently, only Google Cloud Storage URIs are * supported, which must be specified in the following format: * `gs://bucket_name/object_name` (other URI formats return - * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). - * For more information, see [Request - * URIs](https://cloud.google.com/storage/docs/reference-uris). + * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). * * * string uri = 2; @@ -431,8 +429,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build *
    * Contains audio data in the encoding specified in the `RecognitionConfig`.
    * Either `content` or `uri` must be supplied. Supplying both or neither
-   * returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT].
-   * See [content limits](/speech-to-text/quotas#content).
+   * returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. See
+   * [content limits](https://cloud.google.com/speech-to-text/quotas#content).
    * 
* * Protobuf type {@code google.cloud.speech.v1.RecognitionAudio} @@ -624,7 +622,7 @@ public Builder clearAudioSource() { * *
      * The audio data bytes encoded as specified in
-     * `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a
+     * `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
      * pure binary representation, whereas JSON representations use base64.
      * 
* @@ -641,7 +639,7 @@ public com.google.protobuf.ByteString getContent() { * *
      * The audio data bytes encoded as specified in
-     * `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a
+     * `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
      * pure binary representation, whereas JSON representations use base64.
      * 
* @@ -661,7 +659,7 @@ public Builder setContent(com.google.protobuf.ByteString value) { * *
      * The audio data bytes encoded as specified in
-     * `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a
+     * `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
      * pure binary representation, whereas JSON representations use base64.
      * 
* @@ -685,9 +683,8 @@ public Builder clearContent() { * Currently, only Google Cloud Storage URIs are * supported, which must be specified in the following format: * `gs://bucket_name/object_name` (other URI formats return - * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). - * For more information, see [Request - * URIs](https://cloud.google.com/storage/docs/reference-uris). + * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). * * * string uri = 2; @@ -717,9 +714,8 @@ public java.lang.String getUri() { * Currently, only Google Cloud Storage URIs are * supported, which must be specified in the following format: * `gs://bucket_name/object_name` (other URI formats return - * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). - * For more information, see [Request - * URIs](https://cloud.google.com/storage/docs/reference-uris). + * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). * * * string uri = 2; @@ -749,9 +745,8 @@ public com.google.protobuf.ByteString getUriBytes() { * Currently, only Google Cloud Storage URIs are * supported, which must be specified in the following format: * `gs://bucket_name/object_name` (other URI formats return - * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). - * For more information, see [Request - * URIs](https://cloud.google.com/storage/docs/reference-uris). + * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). * * * string uri = 2; @@ -774,9 +769,8 @@ public Builder setUri(java.lang.String value) { * Currently, only Google Cloud Storage URIs are * supported, which must be specified in the following format: * `gs://bucket_name/object_name` (other URI formats return - * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). - * For more information, see [Request - * URIs](https://cloud.google.com/storage/docs/reference-uris). + * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). * * * string uri = 2; @@ -798,9 +792,8 @@ public Builder clearUri() { * Currently, only Google Cloud Storage URIs are * supported, which must be specified in the following format: * `gs://bucket_name/object_name` (other URI formats return - * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). - * For more information, see [Request - * URIs](https://cloud.google.com/storage/docs/reference-uris). + * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). * * * string uri = 2; diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognitionAudioOrBuilder.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognitionAudioOrBuilder.java index de9f823ef..fe99e29d3 100644 --- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognitionAudioOrBuilder.java +++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognitionAudioOrBuilder.java @@ -28,7 +28,7 @@ public interface RecognitionAudioOrBuilder * *
    * The audio data bytes encoded as specified in
-   * `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a
+   * `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
    * pure binary representation, whereas JSON representations use base64.
    * 
* @@ -45,9 +45,8 @@ public interface RecognitionAudioOrBuilder * Currently, only Google Cloud Storage URIs are * supported, which must be specified in the following format: * `gs://bucket_name/object_name` (other URI formats return - * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). - * For more information, see [Request - * URIs](https://cloud.google.com/storage/docs/reference-uris). + * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). * * * string uri = 2; @@ -62,9 +61,8 @@ public interface RecognitionAudioOrBuilder * Currently, only Google Cloud Storage URIs are * supported, which must be specified in the following format: * `gs://bucket_name/object_name` (other URI formats return - * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). - * For more information, see [Request - * URIs](https://cloud.google.com/storage/docs/reference-uris). + * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). * * * string uri = 2; diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognitionConfig.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognitionConfig.java index 2c5754303..9f88b373d 100644 --- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognitionConfig.java +++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognitionConfig.java @@ -217,12 +217,14 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * *
    * The encoding of the audio data sent in the request.
-   * All encodings support only 1 channel (mono) audio.
+   * All encodings support only 1 channel (mono) audio, unless the
+   * `audio_channel_count` and `enable_separate_recognition_per_channel` fields
+   * are set.
    * For best results, the audio source should be captured and transmitted using
    * a lossless encoding (`FLAC` or `LINEAR16`). The accuracy of the speech
    * recognition can be reduced if lossy codecs are used to capture or transmit
    * audio, particularly if background noise is present. Lossy codecs include
-   * `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, and `SPEEX_WITH_HEADER_BYTE`.
+   * `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, `SPEEX_WITH_HEADER_BYTE`, and `MP3`.
    * The `FLAC` and `WAV` audio file formats include a header that describes the
    * included audio content. You can request recognition for `WAV` files that
    * contain either `LINEAR16` or `MULAW` encoded audio.
@@ -232,8 +234,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
    * an `AudioEncoding` when you send  send `FLAC` or `WAV` audio, the
    * encoding configuration must match the encoding described in the audio
    * header; otherwise the request returns an
-   * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error
-   * code.
+   * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error code.
    * 
* * Protobuf enum {@code google.cloud.speech.v1.RecognitionConfig.AudioEncoding} @@ -531,8 +532,7 @@ private AudioEncoding(int value) { *
    * Encoding of audio data sent in all `RecognitionAudio` messages.
    * This field is optional for `FLAC` and `WAV` audio files and required
-   * for all other audio formats. For details, see
-   * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
+   * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
    * 
* * .google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1; @@ -546,8 +546,7 @@ public int getEncodingValue() { *
    * Encoding of audio data sent in all `RecognitionAudio` messages.
    * This field is optional for `FLAC` and `WAV` audio files and required
-   * for all other audio formats. For details, see
-   * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
+   * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
    * 
* * .google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1; @@ -572,9 +571,8 @@ public com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding getEncoding() * 16000 is optimal. For best results, set the sampling rate of the audio * source to 16000 Hz. If that's not possible, use the native sample rate of * the audio source (instead of re-sampling). - * This field is optional for `FLAC` and `WAV` audio files and required - * for all other audio formats. For details, see - * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding]. + * This field is optional for FLAC and WAV audio files, but is + * required for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding]. * * * int32 sample_rate_hertz = 2; @@ -589,7 +587,7 @@ public int getSampleRateHertz() { * * *
-   * *Optional* The number of channels in the input audio data.
+   * The number of channels in the input audio data.
    * ONLY set this for MULTI-CHANNEL recognition.
    * Valid values for LINEAR16 and FLAC are `1`-`8`.
    * Valid values for OGG_OPUS are '1'-'254'.
@@ -632,14 +630,15 @@ public boolean getEnableSeparateRecognitionPerChannel() {
    *
    *
    * 
-   * *Required* The language of the supplied audio as a
+   * Required. The language of the supplied audio as a
    * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
    * Example: "en-US".
-   * See [Language Support](/speech-to-text/docs/languages)
-   * for a list of the currently supported language codes.
+   * See [Language
+   * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+   * of the currently supported language codes.
    * 
* - * string language_code = 3; + * string language_code = 3 [(.google.api.field_behavior) = REQUIRED]; */ public java.lang.String getLanguageCode() { java.lang.Object ref = languageCode_; @@ -656,14 +655,15 @@ public java.lang.String getLanguageCode() { * * *
-   * *Required* The language of the supplied audio as a
+   * Required. The language of the supplied audio as a
    * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
    * Example: "en-US".
-   * See [Language Support](/speech-to-text/docs/languages)
-   * for a list of the currently supported language codes.
+   * See [Language
+   * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+   * of the currently supported language codes.
    * 
* - * string language_code = 3; + * string language_code = 3 [(.google.api.field_behavior) = REQUIRED]; */ public com.google.protobuf.ByteString getLanguageCodeBytes() { java.lang.Object ref = languageCode_; @@ -683,7 +683,7 @@ public com.google.protobuf.ByteString getLanguageCodeBytes() { * * *
-   * *Optional* Maximum number of recognition hypotheses to be returned.
+   * Maximum number of recognition hypotheses to be returned.
    * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
    * within each `SpeechRecognitionResult`.
    * The server may return fewer than `max_alternatives`.
@@ -703,7 +703,7 @@ public int getMaxAlternatives() {
    *
    *
    * 
-   * *Optional* If set to `true`, the server will attempt to filter out
+   * If set to `true`, the server will attempt to filter out
    * profanities, replacing all but the initial character in each filtered word
    * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
    * won't be filtered out.
@@ -721,9 +721,11 @@ public boolean getProfanityFilter() {
    *
    *
    * 
-   * *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
+   * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
    * A means to provide context to assist the speech recognition. For more
-   * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+   * information, see
+   * [speech
+   * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
    * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; @@ -735,9 +737,11 @@ public java.util.List getSpeechContext * * *
-   * *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
+   * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
    * A means to provide context to assist the speech recognition. For more
-   * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+   * information, see
+   * [speech
+   * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
    * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; @@ -750,9 +754,11 @@ public java.util.List getSpeechContext * * *
-   * *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
+   * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
    * A means to provide context to assist the speech recognition. For more
-   * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+   * information, see
+   * [speech
+   * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
    * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; @@ -764,9 +770,11 @@ public int getSpeechContextsCount() { * * *
-   * *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
+   * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
    * A means to provide context to assist the speech recognition. For more
-   * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+   * information, see
+   * [speech
+   * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
    * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; @@ -778,9 +786,11 @@ public com.google.cloud.speech.v1.SpeechContext getSpeechContexts(int index) { * * *
-   * *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
+   * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
    * A means to provide context to assist the speech recognition. For more
-   * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+   * information, see
+   * [speech
+   * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
    * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; @@ -795,7 +805,7 @@ public com.google.cloud.speech.v1.SpeechContextOrBuilder getSpeechContextsOrBuil * * *
-   * *Optional* If `true`, the top result includes a list of words and
+   * If `true`, the top result includes a list of words and
    * the start and end time offsets (timestamps) for those words. If
    * `false`, no word-level time offset information is returned. The default is
    * `false`.
@@ -813,7 +823,7 @@ public boolean getEnableWordTimeOffsets() {
    *
    *
    * 
-   * *Optional* If 'true', adds punctuation to recognition result hypotheses.
+   * If 'true', adds punctuation to recognition result hypotheses.
    * This feature is only available in select languages. Setting this for
    * requests in other languages has no effect at all.
    * The default 'false' value does not add punctuation to result hypotheses.
@@ -834,7 +844,7 @@ public boolean getEnableAutomaticPunctuation() {
    *
    *
    * 
-   * *Optional* Config to enable speaker diarization and set additional
+   * Config to enable speaker diarization and set additional
    * parameters to make diarization better suited for your application.
    * Note: When this is enabled, we send all the words from the beginning of the
    * audio for the top alternative in every consecutive STREAMING responses.
@@ -853,7 +863,7 @@ public boolean hasDiarizationConfig() {
    *
    *
    * 
-   * *Optional* Config to enable speaker diarization and set additional
+   * Config to enable speaker diarization and set additional
    * parameters to make diarization better suited for your application.
    * Note: When this is enabled, we send all the words from the beginning of the
    * audio for the top alternative in every consecutive STREAMING responses.
@@ -874,7 +884,7 @@ public com.google.cloud.speech.v1.SpeakerDiarizationConfig getDiarizationConfig(
    *
    *
    * 
-   * *Optional* Config to enable speaker diarization and set additional
+   * Config to enable speaker diarization and set additional
    * parameters to make diarization better suited for your application.
    * Note: When this is enabled, we send all the words from the beginning of the
    * audio for the top alternative in every consecutive STREAMING responses.
@@ -897,7 +907,7 @@ public com.google.cloud.speech.v1.SpeakerDiarizationConfig getDiarizationConfig(
    *
    *
    * 
-   * *Optional* Metadata regarding this request.
+   * Metadata regarding this request.
    * 
* * .google.cloud.speech.v1.RecognitionMetadata metadata = 9; @@ -909,7 +919,7 @@ public boolean hasMetadata() { * * *
-   * *Optional* Metadata regarding this request.
+   * Metadata regarding this request.
    * 
* * .google.cloud.speech.v1.RecognitionMetadata metadata = 9; @@ -923,7 +933,7 @@ public com.google.cloud.speech.v1.RecognitionMetadata getMetadata() { * * *
-   * *Optional* Metadata regarding this request.
+   * Metadata regarding this request.
    * 
* * .google.cloud.speech.v1.RecognitionMetadata metadata = 9; @@ -938,7 +948,7 @@ public com.google.cloud.speech.v1.RecognitionMetadataOrBuilder getMetadataOrBuil * * *
-   * *Optional* Which model to select for the given request. Select the model
+   * Which model to select for the given request. Select the model
    * best suited to your domain to get best results. If a model is not
    * explicitly specified, then we auto-select a model based on the parameters
    * in the RecognitionConfig.
@@ -989,7 +999,7 @@ public java.lang.String getModel() {
    *
    *
    * 
-   * *Optional* Which model to select for the given request. Select the model
+   * Which model to select for the given request. Select the model
    * best suited to your domain to get best results. If a model is not
    * explicitly specified, then we auto-select a model based on the parameters
    * in the RecognitionConfig.
@@ -1043,7 +1053,7 @@ public com.google.protobuf.ByteString getModelBytes() {
    *
    *
    * 
-   * *Optional* Set to true to use an enhanced model for speech recognition.
+   * Set to true to use an enhanced model for speech recognition.
    * If `use_enhanced` is set to true and the `model` field is not set, then
    * an appropriate enhanced model is chosen if an enhanced model exists for
    * the audio.
@@ -1659,8 +1669,7 @@ public Builder mergeFrom(
      * 
      * Encoding of audio data sent in all `RecognitionAudio` messages.
      * This field is optional for `FLAC` and `WAV` audio files and required
-     * for all other audio formats. For details, see
-     * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
+     * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
      * 
* * .google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1; @@ -1674,8 +1683,7 @@ public int getEncodingValue() { *
      * Encoding of audio data sent in all `RecognitionAudio` messages.
      * This field is optional for `FLAC` and `WAV` audio files and required
-     * for all other audio formats. For details, see
-     * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
+     * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
      * 
* * .google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1; @@ -1691,8 +1699,7 @@ public Builder setEncodingValue(int value) { *
      * Encoding of audio data sent in all `RecognitionAudio` messages.
      * This field is optional for `FLAC` and `WAV` audio files and required
-     * for all other audio formats. For details, see
-     * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
+     * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
      * 
* * .google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1; @@ -1711,8 +1718,7 @@ public com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding getEncoding() *
      * Encoding of audio data sent in all `RecognitionAudio` messages.
      * This field is optional for `FLAC` and `WAV` audio files and required
-     * for all other audio formats. For details, see
-     * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
+     * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
      * 
* * .google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1; @@ -1732,8 +1738,7 @@ public Builder setEncoding(com.google.cloud.speech.v1.RecognitionConfig.AudioEnc *
      * Encoding of audio data sent in all `RecognitionAudio` messages.
      * This field is optional for `FLAC` and `WAV` audio files and required
-     * for all other audio formats. For details, see
-     * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
+     * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
      * 
* * .google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1; @@ -1755,9 +1760,8 @@ public Builder clearEncoding() { * 16000 is optimal. For best results, set the sampling rate of the audio * source to 16000 Hz. If that's not possible, use the native sample rate of * the audio source (instead of re-sampling). - * This field is optional for `FLAC` and `WAV` audio files and required - * for all other audio formats. For details, see - * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding]. + * This field is optional for FLAC and WAV audio files, but is + * required for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding]. *
* * int32 sample_rate_hertz = 2; @@ -1774,9 +1778,8 @@ public int getSampleRateHertz() { * 16000 is optimal. For best results, set the sampling rate of the audio * source to 16000 Hz. If that's not possible, use the native sample rate of * the audio source (instead of re-sampling). - * This field is optional for `FLAC` and `WAV` audio files and required - * for all other audio formats. For details, see - * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding]. + * This field is optional for FLAC and WAV audio files, but is + * required for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding]. *
* * int32 sample_rate_hertz = 2; @@ -1796,9 +1799,8 @@ public Builder setSampleRateHertz(int value) { * 16000 is optimal. For best results, set the sampling rate of the audio * source to 16000 Hz. If that's not possible, use the native sample rate of * the audio source (instead of re-sampling). - * This field is optional for `FLAC` and `WAV` audio files and required - * for all other audio formats. For details, see - * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding]. + * This field is optional for FLAC and WAV audio files, but is + * required for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding]. *
* * int32 sample_rate_hertz = 2; @@ -1815,7 +1817,7 @@ public Builder clearSampleRateHertz() { * * *
-     * *Optional* The number of channels in the input audio data.
+     * The number of channels in the input audio data.
      * ONLY set this for MULTI-CHANNEL recognition.
      * Valid values for LINEAR16 and FLAC are `1`-`8`.
      * Valid values for OGG_OPUS are '1'-'254'.
@@ -1835,7 +1837,7 @@ public int getAudioChannelCount() {
      *
      *
      * 
-     * *Optional* The number of channels in the input audio data.
+     * The number of channels in the input audio data.
      * ONLY set this for MULTI-CHANNEL recognition.
      * Valid values for LINEAR16 and FLAC are `1`-`8`.
      * Valid values for OGG_OPUS are '1'-'254'.
@@ -1858,7 +1860,7 @@ public Builder setAudioChannelCount(int value) {
      *
      *
      * 
-     * *Optional* The number of channels in the input audio data.
+     * The number of channels in the input audio data.
      * ONLY set this for MULTI-CHANNEL recognition.
      * Valid values for LINEAR16 and FLAC are `1`-`8`.
      * Valid values for OGG_OPUS are '1'-'254'.
@@ -1942,14 +1944,15 @@ public Builder clearEnableSeparateRecognitionPerChannel() {
      *
      *
      * 
-     * *Required* The language of the supplied audio as a
+     * Required. The language of the supplied audio as a
      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
      * Example: "en-US".
-     * See [Language Support](/speech-to-text/docs/languages)
-     * for a list of the currently supported language codes.
+     * See [Language
+     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+     * of the currently supported language codes.
      * 
* - * string language_code = 3; + * string language_code = 3 [(.google.api.field_behavior) = REQUIRED]; */ public java.lang.String getLanguageCode() { java.lang.Object ref = languageCode_; @@ -1966,14 +1969,15 @@ public java.lang.String getLanguageCode() { * * *
-     * *Required* The language of the supplied audio as a
+     * Required. The language of the supplied audio as a
      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
      * Example: "en-US".
-     * See [Language Support](/speech-to-text/docs/languages)
-     * for a list of the currently supported language codes.
+     * See [Language
+     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+     * of the currently supported language codes.
      * 
* - * string language_code = 3; + * string language_code = 3 [(.google.api.field_behavior) = REQUIRED]; */ public com.google.protobuf.ByteString getLanguageCodeBytes() { java.lang.Object ref = languageCode_; @@ -1990,14 +1994,15 @@ public com.google.protobuf.ByteString getLanguageCodeBytes() { * * *
-     * *Required* The language of the supplied audio as a
+     * Required. The language of the supplied audio as a
      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
      * Example: "en-US".
-     * See [Language Support](/speech-to-text/docs/languages)
-     * for a list of the currently supported language codes.
+     * See [Language
+     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+     * of the currently supported language codes.
      * 
* - * string language_code = 3; + * string language_code = 3 [(.google.api.field_behavior) = REQUIRED]; */ public Builder setLanguageCode(java.lang.String value) { if (value == null) { @@ -2012,14 +2017,15 @@ public Builder setLanguageCode(java.lang.String value) { * * *
-     * *Required* The language of the supplied audio as a
+     * Required. The language of the supplied audio as a
      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
      * Example: "en-US".
-     * See [Language Support](/speech-to-text/docs/languages)
-     * for a list of the currently supported language codes.
+     * See [Language
+     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+     * of the currently supported language codes.
      * 
* - * string language_code = 3; + * string language_code = 3 [(.google.api.field_behavior) = REQUIRED]; */ public Builder clearLanguageCode() { @@ -2031,14 +2037,15 @@ public Builder clearLanguageCode() { * * *
-     * *Required* The language of the supplied audio as a
+     * Required. The language of the supplied audio as a
      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
      * Example: "en-US".
-     * See [Language Support](/speech-to-text/docs/languages)
-     * for a list of the currently supported language codes.
+     * See [Language
+     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+     * of the currently supported language codes.
      * 
* - * string language_code = 3; + * string language_code = 3 [(.google.api.field_behavior) = REQUIRED]; */ public Builder setLanguageCodeBytes(com.google.protobuf.ByteString value) { if (value == null) { @@ -2056,7 +2063,7 @@ public Builder setLanguageCodeBytes(com.google.protobuf.ByteString value) { * * *
-     * *Optional* Maximum number of recognition hypotheses to be returned.
+     * Maximum number of recognition hypotheses to be returned.
      * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
      * within each `SpeechRecognitionResult`.
      * The server may return fewer than `max_alternatives`.
@@ -2073,7 +2080,7 @@ public int getMaxAlternatives() {
      *
      *
      * 
-     * *Optional* Maximum number of recognition hypotheses to be returned.
+     * Maximum number of recognition hypotheses to be returned.
      * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
      * within each `SpeechRecognitionResult`.
      * The server may return fewer than `max_alternatives`.
@@ -2093,7 +2100,7 @@ public Builder setMaxAlternatives(int value) {
      *
      *
      * 
-     * *Optional* Maximum number of recognition hypotheses to be returned.
+     * Maximum number of recognition hypotheses to be returned.
      * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
      * within each `SpeechRecognitionResult`.
      * The server may return fewer than `max_alternatives`.
@@ -2115,7 +2122,7 @@ public Builder clearMaxAlternatives() {
      *
      *
      * 
-     * *Optional* If set to `true`, the server will attempt to filter out
+     * If set to `true`, the server will attempt to filter out
      * profanities, replacing all but the initial character in each filtered word
      * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
      * won't be filtered out.
@@ -2130,7 +2137,7 @@ public boolean getProfanityFilter() {
      *
      *
      * 
-     * *Optional* If set to `true`, the server will attempt to filter out
+     * If set to `true`, the server will attempt to filter out
      * profanities, replacing all but the initial character in each filtered word
      * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
      * won't be filtered out.
@@ -2148,7 +2155,7 @@ public Builder setProfanityFilter(boolean value) {
      *
      *
      * 
-     * *Optional* If set to `true`, the server will attempt to filter out
+     * If set to `true`, the server will attempt to filter out
      * profanities, replacing all but the initial character in each filtered word
      * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
      * won't be filtered out.
@@ -2184,9 +2191,11 @@ private void ensureSpeechContextsIsMutable() {
      *
      *
      * 
-     * *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
+     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
      * A means to provide context to assist the speech recognition. For more
-     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; @@ -2202,9 +2211,11 @@ public java.util.List getSpeechContext * * *
-     * *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
+     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
      * A means to provide context to assist the speech recognition. For more
-     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; @@ -2220,9 +2231,11 @@ public int getSpeechContextsCount() { * * *
-     * *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
+     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
      * A means to provide context to assist the speech recognition. For more
-     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; @@ -2238,9 +2251,11 @@ public com.google.cloud.speech.v1.SpeechContext getSpeechContexts(int index) { * * *
-     * *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
+     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
      * A means to provide context to assist the speech recognition. For more
-     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; @@ -2262,9 +2277,11 @@ public Builder setSpeechContexts(int index, com.google.cloud.speech.v1.SpeechCon * * *
-     * *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
+     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
      * A means to provide context to assist the speech recognition. For more
-     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; @@ -2284,9 +2301,11 @@ public Builder setSpeechContexts( * * *
-     * *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
+     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
      * A means to provide context to assist the speech recognition. For more
-     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; @@ -2308,9 +2327,11 @@ public Builder addSpeechContexts(com.google.cloud.speech.v1.SpeechContext value) * * *
-     * *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
+     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
      * A means to provide context to assist the speech recognition. For more
-     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; @@ -2332,9 +2353,11 @@ public Builder addSpeechContexts(int index, com.google.cloud.speech.v1.SpeechCon * * *
-     * *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
+     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
      * A means to provide context to assist the speech recognition. For more
-     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; @@ -2354,9 +2377,11 @@ public Builder addSpeechContexts( * * *
-     * *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
+     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
      * A means to provide context to assist the speech recognition. For more
-     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; @@ -2376,9 +2401,11 @@ public Builder addSpeechContexts( * * *
-     * *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
+     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
      * A means to provide context to assist the speech recognition. For more
-     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; @@ -2398,9 +2425,11 @@ public Builder addAllSpeechContexts( * * *
-     * *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
+     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
      * A means to provide context to assist the speech recognition. For more
-     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; @@ -2419,9 +2448,11 @@ public Builder clearSpeechContexts() { * * *
-     * *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
+     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
      * A means to provide context to assist the speech recognition. For more
-     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; @@ -2440,9 +2471,11 @@ public Builder removeSpeechContexts(int index) { * * *
-     * *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
+     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
      * A means to provide context to assist the speech recognition. For more
-     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; @@ -2454,9 +2487,11 @@ public com.google.cloud.speech.v1.SpeechContext.Builder getSpeechContextsBuilder * * *
-     * *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
+     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
      * A means to provide context to assist the speech recognition. For more
-     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; @@ -2472,9 +2507,11 @@ public com.google.cloud.speech.v1.SpeechContextOrBuilder getSpeechContextsOrBuil * * *
-     * *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
+     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
      * A means to provide context to assist the speech recognition. For more
-     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; @@ -2491,9 +2528,11 @@ public com.google.cloud.speech.v1.SpeechContextOrBuilder getSpeechContextsOrBuil * * *
-     * *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
+     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
      * A means to provide context to assist the speech recognition. For more
-     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; @@ -2506,9 +2545,11 @@ public com.google.cloud.speech.v1.SpeechContext.Builder addSpeechContextsBuilder * * *
-     * *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
+     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
      * A means to provide context to assist the speech recognition. For more
-     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; @@ -2521,9 +2562,11 @@ public com.google.cloud.speech.v1.SpeechContext.Builder addSpeechContextsBuilder * * *
-     * *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
+     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
      * A means to provide context to assist the speech recognition. For more
-     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; @@ -2558,7 +2601,7 @@ public com.google.cloud.speech.v1.SpeechContext.Builder addSpeechContextsBuilder * * *
-     * *Optional* If `true`, the top result includes a list of words and
+     * If `true`, the top result includes a list of words and
      * the start and end time offsets (timestamps) for those words. If
      * `false`, no word-level time offset information is returned. The default is
      * `false`.
@@ -2573,7 +2616,7 @@ public boolean getEnableWordTimeOffsets() {
      *
      *
      * 
-     * *Optional* If `true`, the top result includes a list of words and
+     * If `true`, the top result includes a list of words and
      * the start and end time offsets (timestamps) for those words. If
      * `false`, no word-level time offset information is returned. The default is
      * `false`.
@@ -2591,7 +2634,7 @@ public Builder setEnableWordTimeOffsets(boolean value) {
      *
      *
      * 
-     * *Optional* If `true`, the top result includes a list of words and
+     * If `true`, the top result includes a list of words and
      * the start and end time offsets (timestamps) for those words. If
      * `false`, no word-level time offset information is returned. The default is
      * `false`.
@@ -2611,7 +2654,7 @@ public Builder clearEnableWordTimeOffsets() {
      *
      *
      * 
-     * *Optional* If 'true', adds punctuation to recognition result hypotheses.
+     * If 'true', adds punctuation to recognition result hypotheses.
      * This feature is only available in select languages. Setting this for
      * requests in other languages has no effect at all.
      * The default 'false' value does not add punctuation to result hypotheses.
@@ -2629,7 +2672,7 @@ public boolean getEnableAutomaticPunctuation() {
      *
      *
      * 
-     * *Optional* If 'true', adds punctuation to recognition result hypotheses.
+     * If 'true', adds punctuation to recognition result hypotheses.
      * This feature is only available in select languages. Setting this for
      * requests in other languages has no effect at all.
      * The default 'false' value does not add punctuation to result hypotheses.
@@ -2650,7 +2693,7 @@ public Builder setEnableAutomaticPunctuation(boolean value) {
      *
      *
      * 
-     * *Optional* If 'true', adds punctuation to recognition result hypotheses.
+     * If 'true', adds punctuation to recognition result hypotheses.
      * This feature is only available in select languages. Setting this for
      * requests in other languages has no effect at all.
      * The default 'false' value does not add punctuation to result hypotheses.
@@ -2678,7 +2721,7 @@ public Builder clearEnableAutomaticPunctuation() {
      *
      *
      * 
-     * *Optional* Config to enable speaker diarization and set additional
+     * Config to enable speaker diarization and set additional
      * parameters to make diarization better suited for your application.
      * Note: When this is enabled, we send all the words from the beginning of the
      * audio for the top alternative in every consecutive STREAMING responses.
@@ -2697,7 +2740,7 @@ public boolean hasDiarizationConfig() {
      *
      *
      * 
-     * *Optional* Config to enable speaker diarization and set additional
+     * Config to enable speaker diarization and set additional
      * parameters to make diarization better suited for your application.
      * Note: When this is enabled, we send all the words from the beginning of the
      * audio for the top alternative in every consecutive STREAMING responses.
@@ -2722,7 +2765,7 @@ public com.google.cloud.speech.v1.SpeakerDiarizationConfig getDiarizationConfig(
      *
      *
      * 
-     * *Optional* Config to enable speaker diarization and set additional
+     * Config to enable speaker diarization and set additional
      * parameters to make diarization better suited for your application.
      * Note: When this is enabled, we send all the words from the beginning of the
      * audio for the top alternative in every consecutive STREAMING responses.
@@ -2751,7 +2794,7 @@ public Builder setDiarizationConfig(com.google.cloud.speech.v1.SpeakerDiarizatio
      *
      *
      * 
-     * *Optional* Config to enable speaker diarization and set additional
+     * Config to enable speaker diarization and set additional
      * parameters to make diarization better suited for your application.
      * Note: When this is enabled, we send all the words from the beginning of the
      * audio for the top alternative in every consecutive STREAMING responses.
@@ -2778,7 +2821,7 @@ public Builder setDiarizationConfig(
      *
      *
      * 
-     * *Optional* Config to enable speaker diarization and set additional
+     * Config to enable speaker diarization and set additional
      * parameters to make diarization better suited for your application.
      * Note: When this is enabled, we send all the words from the beginning of the
      * audio for the top alternative in every consecutive STREAMING responses.
@@ -2812,7 +2855,7 @@ public Builder mergeDiarizationConfig(
      *
      *
      * 
-     * *Optional* Config to enable speaker diarization and set additional
+     * Config to enable speaker diarization and set additional
      * parameters to make diarization better suited for your application.
      * Note: When this is enabled, we send all the words from the beginning of the
      * audio for the top alternative in every consecutive STREAMING responses.
@@ -2839,7 +2882,7 @@ public Builder clearDiarizationConfig() {
      *
      *
      * 
-     * *Optional* Config to enable speaker diarization and set additional
+     * Config to enable speaker diarization and set additional
      * parameters to make diarization better suited for your application.
      * Note: When this is enabled, we send all the words from the beginning of the
      * audio for the top alternative in every consecutive STREAMING responses.
@@ -2861,7 +2904,7 @@ public Builder clearDiarizationConfig() {
      *
      *
      * 
-     * *Optional* Config to enable speaker diarization and set additional
+     * Config to enable speaker diarization and set additional
      * parameters to make diarization better suited for your application.
      * Note: When this is enabled, we send all the words from the beginning of the
      * audio for the top alternative in every consecutive STREAMING responses.
@@ -2887,7 +2930,7 @@ public Builder clearDiarizationConfig() {
      *
      *
      * 
-     * *Optional* Config to enable speaker diarization and set additional
+     * Config to enable speaker diarization and set additional
      * parameters to make diarization better suited for your application.
      * Note: When this is enabled, we send all the words from the beginning of the
      * audio for the top alternative in every consecutive STREAMING responses.
@@ -2926,7 +2969,7 @@ public Builder clearDiarizationConfig() {
      *
      *
      * 
-     * *Optional* Metadata regarding this request.
+     * Metadata regarding this request.
      * 
* * .google.cloud.speech.v1.RecognitionMetadata metadata = 9; @@ -2938,7 +2981,7 @@ public boolean hasMetadata() { * * *
-     * *Optional* Metadata regarding this request.
+     * Metadata regarding this request.
      * 
* * .google.cloud.speech.v1.RecognitionMetadata metadata = 9; @@ -2956,7 +2999,7 @@ public com.google.cloud.speech.v1.RecognitionMetadata getMetadata() { * * *
-     * *Optional* Metadata regarding this request.
+     * Metadata regarding this request.
      * 
* * .google.cloud.speech.v1.RecognitionMetadata metadata = 9; @@ -2978,7 +3021,7 @@ public Builder setMetadata(com.google.cloud.speech.v1.RecognitionMetadata value) * * *
-     * *Optional* Metadata regarding this request.
+     * Metadata regarding this request.
      * 
* * .google.cloud.speech.v1.RecognitionMetadata metadata = 9; @@ -2998,7 +3041,7 @@ public Builder setMetadata( * * *
-     * *Optional* Metadata regarding this request.
+     * Metadata regarding this request.
      * 
* * .google.cloud.speech.v1.RecognitionMetadata metadata = 9; @@ -3024,7 +3067,7 @@ public Builder mergeMetadata(com.google.cloud.speech.v1.RecognitionMetadata valu * * *
-     * *Optional* Metadata regarding this request.
+     * Metadata regarding this request.
      * 
* * .google.cloud.speech.v1.RecognitionMetadata metadata = 9; @@ -3044,7 +3087,7 @@ public Builder clearMetadata() { * * *
-     * *Optional* Metadata regarding this request.
+     * Metadata regarding this request.
      * 
* * .google.cloud.speech.v1.RecognitionMetadata metadata = 9; @@ -3058,7 +3101,7 @@ public com.google.cloud.speech.v1.RecognitionMetadata.Builder getMetadataBuilder * * *
-     * *Optional* Metadata regarding this request.
+     * Metadata regarding this request.
      * 
* * .google.cloud.speech.v1.RecognitionMetadata metadata = 9; @@ -3076,7 +3119,7 @@ public com.google.cloud.speech.v1.RecognitionMetadataOrBuilder getMetadataOrBuil * * *
-     * *Optional* Metadata regarding this request.
+     * Metadata regarding this request.
      * 
* * .google.cloud.speech.v1.RecognitionMetadata metadata = 9; @@ -3103,7 +3146,7 @@ public com.google.cloud.speech.v1.RecognitionMetadataOrBuilder getMetadataOrBuil * * *
-     * *Optional* Which model to select for the given request. Select the model
+     * Which model to select for the given request. Select the model
      * best suited to your domain to get best results. If a model is not
      * explicitly specified, then we auto-select a model based on the parameters
      * in the RecognitionConfig.
@@ -3154,7 +3197,7 @@ public java.lang.String getModel() {
      *
      *
      * 
-     * *Optional* Which model to select for the given request. Select the model
+     * Which model to select for the given request. Select the model
      * best suited to your domain to get best results. If a model is not
      * explicitly specified, then we auto-select a model based on the parameters
      * in the RecognitionConfig.
@@ -3205,7 +3248,7 @@ public com.google.protobuf.ByteString getModelBytes() {
      *
      *
      * 
-     * *Optional* Which model to select for the given request. Select the model
+     * Which model to select for the given request. Select the model
      * best suited to your domain to get best results. If a model is not
      * explicitly specified, then we auto-select a model based on the parameters
      * in the RecognitionConfig.
@@ -3254,7 +3297,7 @@ public Builder setModel(java.lang.String value) {
      *
      *
      * 
-     * *Optional* Which model to select for the given request. Select the model
+     * Which model to select for the given request. Select the model
      * best suited to your domain to get best results. If a model is not
      * explicitly specified, then we auto-select a model based on the parameters
      * in the RecognitionConfig.
@@ -3300,7 +3343,7 @@ public Builder clearModel() {
      *
      *
      * 
-     * *Optional* Which model to select for the given request. Select the model
+     * Which model to select for the given request. Select the model
      * best suited to your domain to get best results. If a model is not
      * explicitly specified, then we auto-select a model based on the parameters
      * in the RecognitionConfig.
@@ -3352,7 +3395,7 @@ public Builder setModelBytes(com.google.protobuf.ByteString value) {
      *
      *
      * 
-     * *Optional* Set to true to use an enhanced model for speech recognition.
+     * Set to true to use an enhanced model for speech recognition.
      * If `use_enhanced` is set to true and the `model` field is not set, then
      * an appropriate enhanced model is chosen if an enhanced model exists for
      * the audio.
@@ -3370,7 +3413,7 @@ public boolean getUseEnhanced() {
      *
      *
      * 
-     * *Optional* Set to true to use an enhanced model for speech recognition.
+     * Set to true to use an enhanced model for speech recognition.
      * If `use_enhanced` is set to true and the `model` field is not set, then
      * an appropriate enhanced model is chosen if an enhanced model exists for
      * the audio.
@@ -3391,7 +3434,7 @@ public Builder setUseEnhanced(boolean value) {
      *
      *
      * 
-     * *Optional* Set to true to use an enhanced model for speech recognition.
+     * Set to true to use an enhanced model for speech recognition.
      * If `use_enhanced` is set to true and the `model` field is not set, then
      * an appropriate enhanced model is chosen if an enhanced model exists for
      * the audio.
diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognitionConfigOrBuilder.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognitionConfigOrBuilder.java
index f37979048..b003cce86 100644
--- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognitionConfigOrBuilder.java
+++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognitionConfigOrBuilder.java
@@ -29,8 +29,7 @@ public interface RecognitionConfigOrBuilder
    * 
    * Encoding of audio data sent in all `RecognitionAudio` messages.
    * This field is optional for `FLAC` and `WAV` audio files and required
-   * for all other audio formats. For details, see
-   * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
+   * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
    * 
* * .google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1; @@ -42,8 +41,7 @@ public interface RecognitionConfigOrBuilder *
    * Encoding of audio data sent in all `RecognitionAudio` messages.
    * This field is optional for `FLAC` and `WAV` audio files and required
-   * for all other audio formats. For details, see
-   * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
+   * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
    * 
* * .google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1; @@ -59,9 +57,8 @@ public interface RecognitionConfigOrBuilder * 16000 is optimal. For best results, set the sampling rate of the audio * source to 16000 Hz. If that's not possible, use the native sample rate of * the audio source (instead of re-sampling). - * This field is optional for `FLAC` and `WAV` audio files and required - * for all other audio formats. For details, see - * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding]. + * This field is optional for FLAC and WAV audio files, but is + * required for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding]. *
* * int32 sample_rate_hertz = 2; @@ -72,7 +69,7 @@ public interface RecognitionConfigOrBuilder * * *
-   * *Optional* The number of channels in the input audio data.
+   * The number of channels in the input audio data.
    * ONLY set this for MULTI-CHANNEL recognition.
    * Valid values for LINEAR16 and FLAC are `1`-`8`.
    * Valid values for OGG_OPUS are '1'-'254'.
@@ -107,28 +104,30 @@ public interface RecognitionConfigOrBuilder
    *
    *
    * 
-   * *Required* The language of the supplied audio as a
+   * Required. The language of the supplied audio as a
    * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
    * Example: "en-US".
-   * See [Language Support](/speech-to-text/docs/languages)
-   * for a list of the currently supported language codes.
+   * See [Language
+   * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+   * of the currently supported language codes.
    * 
* - * string language_code = 3; + * string language_code = 3 [(.google.api.field_behavior) = REQUIRED]; */ java.lang.String getLanguageCode(); /** * * *
-   * *Required* The language of the supplied audio as a
+   * Required. The language of the supplied audio as a
    * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
    * Example: "en-US".
-   * See [Language Support](/speech-to-text/docs/languages)
-   * for a list of the currently supported language codes.
+   * See [Language
+   * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+   * of the currently supported language codes.
    * 
* - * string language_code = 3; + * string language_code = 3 [(.google.api.field_behavior) = REQUIRED]; */ com.google.protobuf.ByteString getLanguageCodeBytes(); @@ -136,7 +135,7 @@ public interface RecognitionConfigOrBuilder * * *
-   * *Optional* Maximum number of recognition hypotheses to be returned.
+   * Maximum number of recognition hypotheses to be returned.
    * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
    * within each `SpeechRecognitionResult`.
    * The server may return fewer than `max_alternatives`.
@@ -152,7 +151,7 @@ public interface RecognitionConfigOrBuilder
    *
    *
    * 
-   * *Optional* If set to `true`, the server will attempt to filter out
+   * If set to `true`, the server will attempt to filter out
    * profanities, replacing all but the initial character in each filtered word
    * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
    * won't be filtered out.
@@ -166,9 +165,11 @@ public interface RecognitionConfigOrBuilder
    *
    *
    * 
-   * *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
+   * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
    * A means to provide context to assist the speech recognition. For more
-   * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+   * information, see
+   * [speech
+   * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
    * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; @@ -178,9 +179,11 @@ public interface RecognitionConfigOrBuilder * * *
-   * *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
+   * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
    * A means to provide context to assist the speech recognition. For more
-   * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+   * information, see
+   * [speech
+   * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
    * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; @@ -190,9 +193,11 @@ public interface RecognitionConfigOrBuilder * * *
-   * *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
+   * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
    * A means to provide context to assist the speech recognition. For more
-   * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+   * information, see
+   * [speech
+   * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
    * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; @@ -202,9 +207,11 @@ public interface RecognitionConfigOrBuilder * * *
-   * *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
+   * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
    * A means to provide context to assist the speech recognition. For more
-   * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+   * information, see
+   * [speech
+   * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
    * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; @@ -215,9 +222,11 @@ public interface RecognitionConfigOrBuilder * * *
-   * *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
+   * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
    * A means to provide context to assist the speech recognition. For more
-   * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+   * information, see
+   * [speech
+   * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
    * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; @@ -228,7 +237,7 @@ public interface RecognitionConfigOrBuilder * * *
-   * *Optional* If `true`, the top result includes a list of words and
+   * If `true`, the top result includes a list of words and
    * the start and end time offsets (timestamps) for those words. If
    * `false`, no word-level time offset information is returned. The default is
    * `false`.
@@ -242,7 +251,7 @@ public interface RecognitionConfigOrBuilder
    *
    *
    * 
-   * *Optional* If 'true', adds punctuation to recognition result hypotheses.
+   * If 'true', adds punctuation to recognition result hypotheses.
    * This feature is only available in select languages. Setting this for
    * requests in other languages has no effect at all.
    * The default 'false' value does not add punctuation to result hypotheses.
@@ -259,7 +268,7 @@ public interface RecognitionConfigOrBuilder
    *
    *
    * 
-   * *Optional* Config to enable speaker diarization and set additional
+   * Config to enable speaker diarization and set additional
    * parameters to make diarization better suited for your application.
    * Note: When this is enabled, we send all the words from the beginning of the
    * audio for the top alternative in every consecutive STREAMING responses.
@@ -276,7 +285,7 @@ public interface RecognitionConfigOrBuilder
    *
    *
    * 
-   * *Optional* Config to enable speaker diarization and set additional
+   * Config to enable speaker diarization and set additional
    * parameters to make diarization better suited for your application.
    * Note: When this is enabled, we send all the words from the beginning of the
    * audio for the top alternative in every consecutive STREAMING responses.
@@ -293,7 +302,7 @@ public interface RecognitionConfigOrBuilder
    *
    *
    * 
-   * *Optional* Config to enable speaker diarization and set additional
+   * Config to enable speaker diarization and set additional
    * parameters to make diarization better suited for your application.
    * Note: When this is enabled, we send all the words from the beginning of the
    * audio for the top alternative in every consecutive STREAMING responses.
@@ -311,7 +320,7 @@ public interface RecognitionConfigOrBuilder
    *
    *
    * 
-   * *Optional* Metadata regarding this request.
+   * Metadata regarding this request.
    * 
* * .google.cloud.speech.v1.RecognitionMetadata metadata = 9; @@ -321,7 +330,7 @@ public interface RecognitionConfigOrBuilder * * *
-   * *Optional* Metadata regarding this request.
+   * Metadata regarding this request.
    * 
* * .google.cloud.speech.v1.RecognitionMetadata metadata = 9; @@ -331,7 +340,7 @@ public interface RecognitionConfigOrBuilder * * *
-   * *Optional* Metadata regarding this request.
+   * Metadata regarding this request.
    * 
* * .google.cloud.speech.v1.RecognitionMetadata metadata = 9; @@ -342,7 +351,7 @@ public interface RecognitionConfigOrBuilder * * *
-   * *Optional* Which model to select for the given request. Select the model
+   * Which model to select for the given request. Select the model
    * best suited to your domain to get best results. If a model is not
    * explicitly specified, then we auto-select a model based on the parameters
    * in the RecognitionConfig.
@@ -383,7 +392,7 @@ public interface RecognitionConfigOrBuilder
    *
    *
    * 
-   * *Optional* Which model to select for the given request. Select the model
+   * Which model to select for the given request. Select the model
    * best suited to your domain to get best results. If a model is not
    * explicitly specified, then we auto-select a model based on the parameters
    * in the RecognitionConfig.
@@ -425,7 +434,7 @@ public interface RecognitionConfigOrBuilder
    *
    *
    * 
-   * *Optional* Set to true to use an enhanced model for speech recognition.
+   * Set to true to use an enhanced model for speech recognition.
    * If `use_enhanced` is set to true and the `model` field is not set, then
    * an appropriate enhanced model is chosen if an enhanced model exists for
    * the audio.
diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognizeRequest.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognizeRequest.java
index 10826d787..1bedfff3a 100644
--- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognizeRequest.java
+++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognizeRequest.java
@@ -135,11 +135,13 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
    *
    *
    * 
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public boolean hasConfig() { return config_ != null; @@ -148,11 +150,13 @@ public boolean hasConfig() { * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1.RecognitionConfig getConfig() { return config_ == null @@ -163,11 +167,13 @@ public com.google.cloud.speech.v1.RecognitionConfig getConfig() { * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1.RecognitionConfigOrBuilder getConfigOrBuilder() { return getConfig(); @@ -179,10 +185,12 @@ public com.google.cloud.speech.v1.RecognitionConfigOrBuilder getConfigOrBuilder( * * *
-   * *Required* The audio data to be recognized.
+   * Required. The audio data to be recognized.
    * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public boolean hasAudio() { return audio_ != null; @@ -191,10 +199,12 @@ public boolean hasAudio() { * * *
-   * *Required* The audio data to be recognized.
+   * Required. The audio data to be recognized.
    * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1.RecognitionAudio getAudio() { return audio_ == null @@ -205,10 +215,12 @@ public com.google.cloud.speech.v1.RecognitionAudio getAudio() { * * *
-   * *Required* The audio data to be recognized.
+   * Required. The audio data to be recognized.
    * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1.RecognitionAudioOrBuilder getAudioOrBuilder() { return getAudio(); @@ -580,11 +592,13 @@ public Builder mergeFrom( * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public boolean hasConfig() { return configBuilder_ != null || config_ != null; @@ -593,11 +607,13 @@ public boolean hasConfig() { * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1.RecognitionConfig getConfig() { if (configBuilder_ == null) { @@ -612,11 +628,13 @@ public com.google.cloud.speech.v1.RecognitionConfig getConfig() { * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder setConfig(com.google.cloud.speech.v1.RecognitionConfig value) { if (configBuilder_ == null) { @@ -635,11 +653,13 @@ public Builder setConfig(com.google.cloud.speech.v1.RecognitionConfig value) { * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder setConfig(com.google.cloud.speech.v1.RecognitionConfig.Builder builderForValue) { if (configBuilder_ == null) { @@ -655,11 +675,13 @@ public Builder setConfig(com.google.cloud.speech.v1.RecognitionConfig.Builder bu * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder mergeConfig(com.google.cloud.speech.v1.RecognitionConfig value) { if (configBuilder_ == null) { @@ -682,11 +704,13 @@ public Builder mergeConfig(com.google.cloud.speech.v1.RecognitionConfig value) { * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder clearConfig() { if (configBuilder_ == null) { @@ -703,11 +727,13 @@ public Builder clearConfig() { * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1.RecognitionConfig.Builder getConfigBuilder() { @@ -718,11 +744,13 @@ public com.google.cloud.speech.v1.RecognitionConfig.Builder getConfigBuilder() { * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1.RecognitionConfigOrBuilder getConfigOrBuilder() { if (configBuilder_ != null) { @@ -737,11 +765,13 @@ public com.google.cloud.speech.v1.RecognitionConfigOrBuilder getConfigOrBuilder( * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.speech.v1.RecognitionConfig, @@ -770,10 +800,12 @@ public com.google.cloud.speech.v1.RecognitionConfigOrBuilder getConfigOrBuilder( * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public boolean hasAudio() { return audioBuilder_ != null || audio_ != null; @@ -782,10 +814,12 @@ public boolean hasAudio() { * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1.RecognitionAudio getAudio() { if (audioBuilder_ == null) { @@ -800,10 +834,12 @@ public com.google.cloud.speech.v1.RecognitionAudio getAudio() { * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder setAudio(com.google.cloud.speech.v1.RecognitionAudio value) { if (audioBuilder_ == null) { @@ -822,10 +858,12 @@ public Builder setAudio(com.google.cloud.speech.v1.RecognitionAudio value) { * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder setAudio(com.google.cloud.speech.v1.RecognitionAudio.Builder builderForValue) { if (audioBuilder_ == null) { @@ -841,10 +879,12 @@ public Builder setAudio(com.google.cloud.speech.v1.RecognitionAudio.Builder buil * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder mergeAudio(com.google.cloud.speech.v1.RecognitionAudio value) { if (audioBuilder_ == null) { @@ -867,10 +907,12 @@ public Builder mergeAudio(com.google.cloud.speech.v1.RecognitionAudio value) { * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder clearAudio() { if (audioBuilder_ == null) { @@ -887,10 +929,12 @@ public Builder clearAudio() { * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1.RecognitionAudio.Builder getAudioBuilder() { @@ -901,10 +945,12 @@ public com.google.cloud.speech.v1.RecognitionAudio.Builder getAudioBuilder() { * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1.RecognitionAudioOrBuilder getAudioOrBuilder() { if (audioBuilder_ != null) { @@ -919,10 +965,12 @@ public com.google.cloud.speech.v1.RecognitionAudioOrBuilder getAudioOrBuilder() * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.speech.v1.RecognitionAudio, diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognizeRequestOrBuilder.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognizeRequestOrBuilder.java index 1b26740d2..33608ae16 100644 --- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognizeRequestOrBuilder.java +++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognizeRequestOrBuilder.java @@ -27,33 +27,39 @@ public interface RecognizeRequestOrBuilder * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ boolean hasConfig(); /** * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ com.google.cloud.speech.v1.RecognitionConfig getConfig(); /** * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ com.google.cloud.speech.v1.RecognitionConfigOrBuilder getConfigOrBuilder(); @@ -61,30 +67,36 @@ public interface RecognizeRequestOrBuilder * * *
-   * *Required* The audio data to be recognized.
+   * Required. The audio data to be recognized.
    * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ boolean hasAudio(); /** * * *
-   * *Required* The audio data to be recognized.
+   * Required. The audio data to be recognized.
    * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ com.google.cloud.speech.v1.RecognitionAudio getAudio(); /** * * *
-   * *Required* The audio data to be recognized.
+   * Required. The audio data to be recognized.
    * 
* - * .google.cloud.speech.v1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ com.google.cloud.speech.v1.RecognitionAudioOrBuilder getAudioOrBuilder(); } diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognizeResponse.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognizeResponse.java index c35bc67d7..48046d87a 100644 --- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognizeResponse.java +++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognizeResponse.java @@ -123,7 +123,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -136,7 +136,7 @@ public java.util.List getRes * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -150,7 +150,7 @@ public java.util.List getRes * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -163,7 +163,7 @@ public int getResultsCount() { * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -176,7 +176,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionResult getResults(int index) * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -568,7 +568,7 @@ private void ensureResultsIsMutable() { * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -585,7 +585,7 @@ public java.util.List getRes * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -602,7 +602,7 @@ public int getResultsCount() { * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -619,7 +619,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionResult getResults(int index) * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -642,7 +642,7 @@ public Builder setResults(int index, com.google.cloud.speech.v1.SpeechRecognitio * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -663,7 +663,7 @@ public Builder setResults( * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -686,7 +686,7 @@ public Builder addResults(com.google.cloud.speech.v1.SpeechRecognitionResult val * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -709,7 +709,7 @@ public Builder addResults(int index, com.google.cloud.speech.v1.SpeechRecognitio * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -730,7 +730,7 @@ public Builder addResults( * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -751,7 +751,7 @@ public Builder addResults( * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -772,7 +772,7 @@ public Builder addAllResults( * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -792,7 +792,7 @@ public Builder clearResults() { * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -812,7 +812,7 @@ public Builder removeResults(int index) { * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -825,7 +825,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionResult.Builder getResultsBuil * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -843,7 +843,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionResultOrBuilder getResultsOrB * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -861,7 +861,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionResultOrBuilder getResultsOrB * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -875,7 +875,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionResult.Builder addResultsBuil * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -890,7 +890,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionResult.Builder addResultsBuil * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognizeResponseOrBuilder.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognizeResponseOrBuilder.java index 345a88f41..7c0cadfb0 100644 --- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognizeResponseOrBuilder.java +++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognizeResponseOrBuilder.java @@ -27,7 +27,7 @@ public interface RecognizeResponseOrBuilder * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -38,7 +38,7 @@ public interface RecognizeResponseOrBuilder * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -49,7 +49,7 @@ public interface RecognizeResponseOrBuilder * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -60,7 +60,7 @@ public interface RecognizeResponseOrBuilder * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -72,7 +72,7 @@ public interface RecognizeResponseOrBuilder * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeakerDiarizationConfig.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeakerDiarizationConfig.java index 40c590943..33504da02 100644 --- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeakerDiarizationConfig.java +++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeakerDiarizationConfig.java @@ -22,7 +22,7 @@ * * *
- * *Optional* Config to enable speaker diarization.
+ * Config to enable speaker diarization.
  * 
* * Protobuf type {@code google.cloud.speech.v1.SpeakerDiarizationConfig} @@ -123,7 +123,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-   * *Optional* If 'true', enables speaker detection for each recognized word in
+   * If 'true', enables speaker detection for each recognized word in
    * the top alternative of the recognition result using a speaker_tag provided
    * in the WordInfo.
    * 
@@ -140,7 +140,6 @@ public boolean getEnableSpeakerDiarization() { * * *
-   * *Optional*
    * Minimum number of speakers in the conversation. This range gives you more
    * flexibility by allowing the system to automatically determine the correct
    * number of speakers. If not set, the default value is 2.
@@ -158,7 +157,6 @@ public int getMinSpeakerCount() {
    *
    *
    * 
-   * *Optional*
    * Maximum number of speakers in the conversation. This range gives you more
    * flexibility by allowing the system to automatically determine the correct
    * number of speakers. If not set, the default value is 6.
@@ -176,14 +174,14 @@ public int getMaxSpeakerCount() {
    *
    *
    * 
-   * Output only. A distinct integer value is assigned for every speaker within
+   * A distinct integer value is assigned for every speaker within
    * the audio. This field specifies which one of those speakers was detected to
    * have spoken this word. Value ranges from '1' to diarization_speaker_count.
    * speaker_tag is set if enable_speaker_diarization = 'true' and only in the
    * top alternative.
    * 
* - * int32 speaker_tag = 5; + * int32 speaker_tag = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; */ public int getSpeakerTag() { return speakerTag_; @@ -379,7 +377,7 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
-   * *Optional* Config to enable speaker diarization.
+   * Config to enable speaker diarization.
    * 
* * Protobuf type {@code google.cloud.speech.v1.SpeakerDiarizationConfig} @@ -556,7 +554,7 @@ public Builder mergeFrom( * * *
-     * *Optional* If 'true', enables speaker detection for each recognized word in
+     * If 'true', enables speaker detection for each recognized word in
      * the top alternative of the recognition result using a speaker_tag provided
      * in the WordInfo.
      * 
@@ -570,7 +568,7 @@ public boolean getEnableSpeakerDiarization() { * * *
-     * *Optional* If 'true', enables speaker detection for each recognized word in
+     * If 'true', enables speaker detection for each recognized word in
      * the top alternative of the recognition result using a speaker_tag provided
      * in the WordInfo.
      * 
@@ -587,7 +585,7 @@ public Builder setEnableSpeakerDiarization(boolean value) { * * *
-     * *Optional* If 'true', enables speaker detection for each recognized word in
+     * If 'true', enables speaker detection for each recognized word in
      * the top alternative of the recognition result using a speaker_tag provided
      * in the WordInfo.
      * 
@@ -606,7 +604,6 @@ public Builder clearEnableSpeakerDiarization() { * * *
-     * *Optional*
      * Minimum number of speakers in the conversation. This range gives you more
      * flexibility by allowing the system to automatically determine the correct
      * number of speakers. If not set, the default value is 2.
@@ -621,7 +618,6 @@ public int getMinSpeakerCount() {
      *
      *
      * 
-     * *Optional*
      * Minimum number of speakers in the conversation. This range gives you more
      * flexibility by allowing the system to automatically determine the correct
      * number of speakers. If not set, the default value is 2.
@@ -639,7 +635,6 @@ public Builder setMinSpeakerCount(int value) {
      *
      *
      * 
-     * *Optional*
      * Minimum number of speakers in the conversation. This range gives you more
      * flexibility by allowing the system to automatically determine the correct
      * number of speakers. If not set, the default value is 2.
@@ -659,7 +654,6 @@ public Builder clearMinSpeakerCount() {
      *
      *
      * 
-     * *Optional*
      * Maximum number of speakers in the conversation. This range gives you more
      * flexibility by allowing the system to automatically determine the correct
      * number of speakers. If not set, the default value is 6.
@@ -674,7 +668,6 @@ public int getMaxSpeakerCount() {
      *
      *
      * 
-     * *Optional*
      * Maximum number of speakers in the conversation. This range gives you more
      * flexibility by allowing the system to automatically determine the correct
      * number of speakers. If not set, the default value is 6.
@@ -692,7 +685,6 @@ public Builder setMaxSpeakerCount(int value) {
      *
      *
      * 
-     * *Optional*
      * Maximum number of speakers in the conversation. This range gives you more
      * flexibility by allowing the system to automatically determine the correct
      * number of speakers. If not set, the default value is 6.
@@ -712,14 +704,14 @@ public Builder clearMaxSpeakerCount() {
      *
      *
      * 
-     * Output only. A distinct integer value is assigned for every speaker within
+     * A distinct integer value is assigned for every speaker within
      * the audio. This field specifies which one of those speakers was detected to
      * have spoken this word. Value ranges from '1' to diarization_speaker_count.
      * speaker_tag is set if enable_speaker_diarization = 'true' and only in the
      * top alternative.
      * 
* - * int32 speaker_tag = 5; + * int32 speaker_tag = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; */ public int getSpeakerTag() { return speakerTag_; @@ -728,14 +720,14 @@ public int getSpeakerTag() { * * *
-     * Output only. A distinct integer value is assigned for every speaker within
+     * A distinct integer value is assigned for every speaker within
      * the audio. This field specifies which one of those speakers was detected to
      * have spoken this word. Value ranges from '1' to diarization_speaker_count.
      * speaker_tag is set if enable_speaker_diarization = 'true' and only in the
      * top alternative.
      * 
* - * int32 speaker_tag = 5; + * int32 speaker_tag = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; */ public Builder setSpeakerTag(int value) { @@ -747,14 +739,14 @@ public Builder setSpeakerTag(int value) { * * *
-     * Output only. A distinct integer value is assigned for every speaker within
+     * A distinct integer value is assigned for every speaker within
      * the audio. This field specifies which one of those speakers was detected to
      * have spoken this word. Value ranges from '1' to diarization_speaker_count.
      * speaker_tag is set if enable_speaker_diarization = 'true' and only in the
      * top alternative.
      * 
* - * int32 speaker_tag = 5; + * int32 speaker_tag = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; */ public Builder clearSpeakerTag() { diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeakerDiarizationConfigOrBuilder.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeakerDiarizationConfigOrBuilder.java index fa8546a80..4fabb7a0b 100644 --- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeakerDiarizationConfigOrBuilder.java +++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeakerDiarizationConfigOrBuilder.java @@ -27,7 +27,7 @@ public interface SpeakerDiarizationConfigOrBuilder * * *
-   * *Optional* If 'true', enables speaker detection for each recognized word in
+   * If 'true', enables speaker detection for each recognized word in
    * the top alternative of the recognition result using a speaker_tag provided
    * in the WordInfo.
    * 
@@ -40,7 +40,6 @@ public interface SpeakerDiarizationConfigOrBuilder * * *
-   * *Optional*
    * Minimum number of speakers in the conversation. This range gives you more
    * flexibility by allowing the system to automatically determine the correct
    * number of speakers. If not set, the default value is 2.
@@ -54,7 +53,6 @@ public interface SpeakerDiarizationConfigOrBuilder
    *
    *
    * 
-   * *Optional*
    * Maximum number of speakers in the conversation. This range gives you more
    * flexibility by allowing the system to automatically determine the correct
    * number of speakers. If not set, the default value is 6.
@@ -68,14 +66,14 @@ public interface SpeakerDiarizationConfigOrBuilder
    *
    *
    * 
-   * Output only. A distinct integer value is assigned for every speaker within
+   * A distinct integer value is assigned for every speaker within
    * the audio. This field specifies which one of those speakers was detected to
    * have spoken this word. Value ranges from '1' to diarization_speaker_count.
    * speaker_tag is set if enable_speaker_diarization = 'true' and only in the
    * top alternative.
    * 
* - * int32 speaker_tag = 5; + * int32 speaker_tag = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; */ int getSpeakerTag(); } diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechContext.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechContext.java index 2dd53ac15..1a281a8ce 100644 --- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechContext.java +++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechContext.java @@ -119,12 +119,17 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-   * *Optional* A list of strings containing words and phrases "hints" so that
+   * A list of strings containing words and phrases "hints" so that
    * the speech recognition is more likely to recognize them. This can be used
    * to improve the accuracy for specific words and phrases, for example, if
    * specific commands are typically spoken by the user. This can also be used
    * to add additional words to the vocabulary of the recognizer. See
-   * [usage limits](/speech-to-text/quotas#content).
+   * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
+   * List items can also be set to classes for groups of words that represent
+   * common concepts that occur in natural language. For example, rather than
+   * providing phrase hints for every month of the year, using the $MONTH class
+   * improves the likelihood of correctly transcribing audio that includes
+   * months.
    * 
* * repeated string phrases = 1; @@ -136,12 +141,17 @@ public com.google.protobuf.ProtocolStringList getPhrasesList() { * * *
-   * *Optional* A list of strings containing words and phrases "hints" so that
+   * A list of strings containing words and phrases "hints" so that
    * the speech recognition is more likely to recognize them. This can be used
    * to improve the accuracy for specific words and phrases, for example, if
    * specific commands are typically spoken by the user. This can also be used
    * to add additional words to the vocabulary of the recognizer. See
-   * [usage limits](/speech-to-text/quotas#content).
+   * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
+   * List items can also be set to classes for groups of words that represent
+   * common concepts that occur in natural language. For example, rather than
+   * providing phrase hints for every month of the year, using the $MONTH class
+   * improves the likelihood of correctly transcribing audio that includes
+   * months.
    * 
* * repeated string phrases = 1; @@ -153,12 +163,17 @@ public int getPhrasesCount() { * * *
-   * *Optional* A list of strings containing words and phrases "hints" so that
+   * A list of strings containing words and phrases "hints" so that
    * the speech recognition is more likely to recognize them. This can be used
    * to improve the accuracy for specific words and phrases, for example, if
    * specific commands are typically spoken by the user. This can also be used
    * to add additional words to the vocabulary of the recognizer. See
-   * [usage limits](/speech-to-text/quotas#content).
+   * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
+   * List items can also be set to classes for groups of words that represent
+   * common concepts that occur in natural language. For example, rather than
+   * providing phrase hints for every month of the year, using the $MONTH class
+   * improves the likelihood of correctly transcribing audio that includes
+   * months.
    * 
* * repeated string phrases = 1; @@ -170,12 +185,17 @@ public java.lang.String getPhrases(int index) { * * *
-   * *Optional* A list of strings containing words and phrases "hints" so that
+   * A list of strings containing words and phrases "hints" so that
    * the speech recognition is more likely to recognize them. This can be used
    * to improve the accuracy for specific words and phrases, for example, if
    * specific commands are typically spoken by the user. This can also be used
    * to add additional words to the vocabulary of the recognizer. See
-   * [usage limits](/speech-to-text/quotas#content).
+   * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
+   * List items can also be set to classes for groups of words that represent
+   * common concepts that occur in natural language. For example, rather than
+   * providing phrase hints for every month of the year, using the $MONTH class
+   * improves the likelihood of correctly transcribing audio that includes
+   * months.
    * 
* * repeated string phrases = 1; @@ -533,12 +553,17 @@ private void ensurePhrasesIsMutable() { * * *
-     * *Optional* A list of strings containing words and phrases "hints" so that
+     * A list of strings containing words and phrases "hints" so that
      * the speech recognition is more likely to recognize them. This can be used
      * to improve the accuracy for specific words and phrases, for example, if
      * specific commands are typically spoken by the user. This can also be used
      * to add additional words to the vocabulary of the recognizer. See
-     * [usage limits](/speech-to-text/quotas#content).
+     * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
+     * List items can also be set to classes for groups of words that represent
+     * common concepts that occur in natural language. For example, rather than
+     * providing phrase hints for every month of the year, using the $MONTH class
+     * improves the likelihood of correctly transcribing audio that includes
+     * months.
      * 
* * repeated string phrases = 1; @@ -550,12 +575,17 @@ public com.google.protobuf.ProtocolStringList getPhrasesList() { * * *
-     * *Optional* A list of strings containing words and phrases "hints" so that
+     * A list of strings containing words and phrases "hints" so that
      * the speech recognition is more likely to recognize them. This can be used
      * to improve the accuracy for specific words and phrases, for example, if
      * specific commands are typically spoken by the user. This can also be used
      * to add additional words to the vocabulary of the recognizer. See
-     * [usage limits](/speech-to-text/quotas#content).
+     * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
+     * List items can also be set to classes for groups of words that represent
+     * common concepts that occur in natural language. For example, rather than
+     * providing phrase hints for every month of the year, using the $MONTH class
+     * improves the likelihood of correctly transcribing audio that includes
+     * months.
      * 
* * repeated string phrases = 1; @@ -567,12 +597,17 @@ public int getPhrasesCount() { * * *
-     * *Optional* A list of strings containing words and phrases "hints" so that
+     * A list of strings containing words and phrases "hints" so that
      * the speech recognition is more likely to recognize them. This can be used
      * to improve the accuracy for specific words and phrases, for example, if
      * specific commands are typically spoken by the user. This can also be used
      * to add additional words to the vocabulary of the recognizer. See
-     * [usage limits](/speech-to-text/quotas#content).
+     * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
+     * List items can also be set to classes for groups of words that represent
+     * common concepts that occur in natural language. For example, rather than
+     * providing phrase hints for every month of the year, using the $MONTH class
+     * improves the likelihood of correctly transcribing audio that includes
+     * months.
      * 
* * repeated string phrases = 1; @@ -584,12 +619,17 @@ public java.lang.String getPhrases(int index) { * * *
-     * *Optional* A list of strings containing words and phrases "hints" so that
+     * A list of strings containing words and phrases "hints" so that
      * the speech recognition is more likely to recognize them. This can be used
      * to improve the accuracy for specific words and phrases, for example, if
      * specific commands are typically spoken by the user. This can also be used
      * to add additional words to the vocabulary of the recognizer. See
-     * [usage limits](/speech-to-text/quotas#content).
+     * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
+     * List items can also be set to classes for groups of words that represent
+     * common concepts that occur in natural language. For example, rather than
+     * providing phrase hints for every month of the year, using the $MONTH class
+     * improves the likelihood of correctly transcribing audio that includes
+     * months.
      * 
* * repeated string phrases = 1; @@ -601,12 +641,17 @@ public com.google.protobuf.ByteString getPhrasesBytes(int index) { * * *
-     * *Optional* A list of strings containing words and phrases "hints" so that
+     * A list of strings containing words and phrases "hints" so that
      * the speech recognition is more likely to recognize them. This can be used
      * to improve the accuracy for specific words and phrases, for example, if
      * specific commands are typically spoken by the user. This can also be used
      * to add additional words to the vocabulary of the recognizer. See
-     * [usage limits](/speech-to-text/quotas#content).
+     * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
+     * List items can also be set to classes for groups of words that represent
+     * common concepts that occur in natural language. For example, rather than
+     * providing phrase hints for every month of the year, using the $MONTH class
+     * improves the likelihood of correctly transcribing audio that includes
+     * months.
      * 
* * repeated string phrases = 1; @@ -624,12 +669,17 @@ public Builder setPhrases(int index, java.lang.String value) { * * *
-     * *Optional* A list of strings containing words and phrases "hints" so that
+     * A list of strings containing words and phrases "hints" so that
      * the speech recognition is more likely to recognize them. This can be used
      * to improve the accuracy for specific words and phrases, for example, if
      * specific commands are typically spoken by the user. This can also be used
      * to add additional words to the vocabulary of the recognizer. See
-     * [usage limits](/speech-to-text/quotas#content).
+     * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
+     * List items can also be set to classes for groups of words that represent
+     * common concepts that occur in natural language. For example, rather than
+     * providing phrase hints for every month of the year, using the $MONTH class
+     * improves the likelihood of correctly transcribing audio that includes
+     * months.
      * 
* * repeated string phrases = 1; @@ -647,12 +697,17 @@ public Builder addPhrases(java.lang.String value) { * * *
-     * *Optional* A list of strings containing words and phrases "hints" so that
+     * A list of strings containing words and phrases "hints" so that
      * the speech recognition is more likely to recognize them. This can be used
      * to improve the accuracy for specific words and phrases, for example, if
      * specific commands are typically spoken by the user. This can also be used
      * to add additional words to the vocabulary of the recognizer. See
-     * [usage limits](/speech-to-text/quotas#content).
+     * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
+     * List items can also be set to classes for groups of words that represent
+     * common concepts that occur in natural language. For example, rather than
+     * providing phrase hints for every month of the year, using the $MONTH class
+     * improves the likelihood of correctly transcribing audio that includes
+     * months.
      * 
* * repeated string phrases = 1; @@ -667,12 +722,17 @@ public Builder addAllPhrases(java.lang.Iterable values) { * * *
-     * *Optional* A list of strings containing words and phrases "hints" so that
+     * A list of strings containing words and phrases "hints" so that
      * the speech recognition is more likely to recognize them. This can be used
      * to improve the accuracy for specific words and phrases, for example, if
      * specific commands are typically spoken by the user. This can also be used
      * to add additional words to the vocabulary of the recognizer. See
-     * [usage limits](/speech-to-text/quotas#content).
+     * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
+     * List items can also be set to classes for groups of words that represent
+     * common concepts that occur in natural language. For example, rather than
+     * providing phrase hints for every month of the year, using the $MONTH class
+     * improves the likelihood of correctly transcribing audio that includes
+     * months.
      * 
* * repeated string phrases = 1; @@ -687,12 +747,17 @@ public Builder clearPhrases() { * * *
-     * *Optional* A list of strings containing words and phrases "hints" so that
+     * A list of strings containing words and phrases "hints" so that
      * the speech recognition is more likely to recognize them. This can be used
      * to improve the accuracy for specific words and phrases, for example, if
      * specific commands are typically spoken by the user. This can also be used
      * to add additional words to the vocabulary of the recognizer. See
-     * [usage limits](/speech-to-text/quotas#content).
+     * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
+     * List items can also be set to classes for groups of words that represent
+     * common concepts that occur in natural language. For example, rather than
+     * providing phrase hints for every month of the year, using the $MONTH class
+     * improves the likelihood of correctly transcribing audio that includes
+     * months.
      * 
* * repeated string phrases = 1; diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechContextOrBuilder.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechContextOrBuilder.java index 9417f0aa8..f2307cbb9 100644 --- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechContextOrBuilder.java +++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechContextOrBuilder.java @@ -27,12 +27,17 @@ public interface SpeechContextOrBuilder * * *
-   * *Optional* A list of strings containing words and phrases "hints" so that
+   * A list of strings containing words and phrases "hints" so that
    * the speech recognition is more likely to recognize them. This can be used
    * to improve the accuracy for specific words and phrases, for example, if
    * specific commands are typically spoken by the user. This can also be used
    * to add additional words to the vocabulary of the recognizer. See
-   * [usage limits](/speech-to-text/quotas#content).
+   * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
+   * List items can also be set to classes for groups of words that represent
+   * common concepts that occur in natural language. For example, rather than
+   * providing phrase hints for every month of the year, using the $MONTH class
+   * improves the likelihood of correctly transcribing audio that includes
+   * months.
    * 
* * repeated string phrases = 1; @@ -42,12 +47,17 @@ public interface SpeechContextOrBuilder * * *
-   * *Optional* A list of strings containing words and phrases "hints" so that
+   * A list of strings containing words and phrases "hints" so that
    * the speech recognition is more likely to recognize them. This can be used
    * to improve the accuracy for specific words and phrases, for example, if
    * specific commands are typically spoken by the user. This can also be used
    * to add additional words to the vocabulary of the recognizer. See
-   * [usage limits](/speech-to-text/quotas#content).
+   * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
+   * List items can also be set to classes for groups of words that represent
+   * common concepts that occur in natural language. For example, rather than
+   * providing phrase hints for every month of the year, using the $MONTH class
+   * improves the likelihood of correctly transcribing audio that includes
+   * months.
    * 
* * repeated string phrases = 1; @@ -57,12 +67,17 @@ public interface SpeechContextOrBuilder * * *
-   * *Optional* A list of strings containing words and phrases "hints" so that
+   * A list of strings containing words and phrases "hints" so that
    * the speech recognition is more likely to recognize them. This can be used
    * to improve the accuracy for specific words and phrases, for example, if
    * specific commands are typically spoken by the user. This can also be used
    * to add additional words to the vocabulary of the recognizer. See
-   * [usage limits](/speech-to-text/quotas#content).
+   * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
+   * List items can also be set to classes for groups of words that represent
+   * common concepts that occur in natural language. For example, rather than
+   * providing phrase hints for every month of the year, using the $MONTH class
+   * improves the likelihood of correctly transcribing audio that includes
+   * months.
    * 
* * repeated string phrases = 1; @@ -72,12 +87,17 @@ public interface SpeechContextOrBuilder * * *
-   * *Optional* A list of strings containing words and phrases "hints" so that
+   * A list of strings containing words and phrases "hints" so that
    * the speech recognition is more likely to recognize them. This can be used
    * to improve the accuracy for specific words and phrases, for example, if
    * specific commands are typically spoken by the user. This can also be used
    * to add additional words to the vocabulary of the recognizer. See
-   * [usage limits](/speech-to-text/quotas#content).
+   * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
+   * List items can also be set to classes for groups of words that represent
+   * common concepts that occur in natural language. For example, rather than
+   * providing phrase hints for every month of the year, using the $MONTH class
+   * improves the likelihood of correctly transcribing audio that includes
+   * months.
    * 
* * repeated string phrases = 1; diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechProto.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechProto.java index 7a06b78f3..39323e613 100644 --- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechProto.java +++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechProto.java @@ -106,46 +106,49 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { java.lang.String[] descriptorData = { "\n)google/cloud/speech/v1/cloud_speech.pr" + "oto\022\026google.cloud.speech.v1\032\034google/api/" - + "annotations.proto\032#google/longrunning/op" - + "erations.proto\032\036google/protobuf/duration" - + ".proto\032\037google/protobuf/timestamp.proto\032" - + "\027google/rpc/status.proto\"\206\001\n\020RecognizeRe" - + "quest\0229\n\006config\030\001 \001(\0132).google.cloud.spe" - + "ech.v1.RecognitionConfig\0227\n\005audio\030\002 \001(\0132" - + "(.google.cloud.speech.v1.RecognitionAudi" - + "o\"\221\001\n\033LongRunningRecognizeRequest\0229\n\006con" - + "fig\030\001 \001(\0132).google.cloud.speech.v1.Recog" - + "nitionConfig\0227\n\005audio\030\002 \001(\0132(.google.clo" - + "ud.speech.v1.RecognitionAudio\"\231\001\n\031Stream" - + "ingRecognizeRequest\022N\n\020streaming_config\030" - + "\001 \001(\01322.google.cloud.speech.v1.Streaming" - + "RecognitionConfigH\000\022\027\n\raudio_content\030\002 \001" - + "(\014H\000B\023\n\021streaming_request\"\212\001\n\032StreamingR" - + "ecognitionConfig\0229\n\006config\030\001 \001(\0132).googl" - + "e.cloud.speech.v1.RecognitionConfig\022\030\n\020s" - + "ingle_utterance\030\002 \001(\010\022\027\n\017interim_results" - + "\030\003 \001(\010\"\332\005\n\021RecognitionConfig\022I\n\010encoding" - + "\030\001 \001(\01627.google.cloud.speech.v1.Recognit" - + "ionConfig.AudioEncoding\022\031\n\021sample_rate_h" - + "ertz\030\002 \001(\005\022\033\n\023audio_channel_count\030\007 \001(\005\022" - + "/\n\'enable_separate_recognition_per_chann" - + "el\030\014 \001(\010\022\025\n\rlanguage_code\030\003 \001(\t\022\030\n\020max_a" - + "lternatives\030\004 \001(\005\022\030\n\020profanity_filter\030\005 " - + "\001(\010\022>\n\017speech_contexts\030\006 \003(\0132%.google.cl" - + "oud.speech.v1.SpeechContext\022 \n\030enable_wo" - + "rd_time_offsets\030\010 \001(\010\022$\n\034enable_automati" - + "c_punctuation\030\013 \001(\010\022L\n\022diarization_confi" - + "g\030\023 \001(\01320.google.cloud.speech.v1.Speaker" - + "DiarizationConfig\022=\n\010metadata\030\t \001(\0132+.go" - + "ogle.cloud.speech.v1.RecognitionMetadata" - + "\022\r\n\005model\030\r \001(\t\022\024\n\014use_enhanced\030\016 \001(\010\"\213\001" - + "\n\rAudioEncoding\022\030\n\024ENCODING_UNSPECIFIED\020" - + "\000\022\014\n\010LINEAR16\020\001\022\010\n\004FLAC\020\002\022\t\n\005MULAW\020\003\022\007\n\003" - + "AMR\020\004\022\n\n\006AMR_WB\020\005\022\014\n\010OGG_OPUS\020\006\022\032\n\026SPEEX" - + "_WITH_HEADER_BYTE\020\007\"\211\001\n\030SpeakerDiarizati" - + "onConfig\022\"\n\032enable_speaker_diarization\030\001" - + " \001(\010\022\031\n\021min_speaker_count\030\002 \001(\005\022\031\n\021max_s" - + "peaker_count\030\003 \001(\005\022\023\n\013speaker_tag\030\005 \001(\005\"" + + "annotations.proto\032\027google/api/client.pro" + + "to\032\037google/api/field_behavior.proto\032#goo" + + "gle/longrunning/operations.proto\032\031google" + + "/protobuf/any.proto\032\036google/protobuf/dur" + + "ation.proto\032\037google/protobuf/timestamp.p" + + "roto\032\027google/rpc/status.proto\"\220\001\n\020Recogn" + + "izeRequest\022>\n\006config\030\001 \001(\0132).google.clou" + + "d.speech.v1.RecognitionConfigB\003\340A\002\022<\n\005au" + + "dio\030\002 \001(\0132(.google.cloud.speech.v1.Recog" + + "nitionAudioB\003\340A\002\"\233\001\n\033LongRunningRecogniz" + + "eRequest\022>\n\006config\030\001 \001(\0132).google.cloud." + + "speech.v1.RecognitionConfigB\003\340A\002\022<\n\005audi" + + "o\030\002 \001(\0132(.google.cloud.speech.v1.Recogni" + + "tionAudioB\003\340A\002\"\231\001\n\031StreamingRecognizeReq" + + "uest\022N\n\020streaming_config\030\001 \001(\01322.google." + + "cloud.speech.v1.StreamingRecognitionConf" + + "igH\000\022\027\n\raudio_content\030\002 \001(\014H\000B\023\n\021streami" + + "ng_request\"\217\001\n\032StreamingRecognitionConfi" + + "g\022>\n\006config\030\001 \001(\0132).google.cloud.speech." + + "v1.RecognitionConfigB\003\340A\002\022\030\n\020single_utte" + + "rance\030\002 \001(\010\022\027\n\017interim_results\030\003 \001(\010\"\337\005\n" + + "\021RecognitionConfig\022I\n\010encoding\030\001 \001(\01627.g" + + "oogle.cloud.speech.v1.RecognitionConfig." + + "AudioEncoding\022\031\n\021sample_rate_hertz\030\002 \001(\005" + + "\022\033\n\023audio_channel_count\030\007 \001(\005\022/\n\'enable_" + + "separate_recognition_per_channel\030\014 \001(\010\022\032" + + "\n\rlanguage_code\030\003 \001(\tB\003\340A\002\022\030\n\020max_altern" + + "atives\030\004 \001(\005\022\030\n\020profanity_filter\030\005 \001(\010\022>" + + "\n\017speech_contexts\030\006 \003(\0132%.google.cloud.s" + + "peech.v1.SpeechContext\022 \n\030enable_word_ti" + + "me_offsets\030\010 \001(\010\022$\n\034enable_automatic_pun" + + "ctuation\030\013 \001(\010\022L\n\022diarization_config\030\023 \001" + + "(\01320.google.cloud.speech.v1.SpeakerDiari" + + "zationConfig\022=\n\010metadata\030\t \001(\0132+.google." + + "cloud.speech.v1.RecognitionMetadata\022\r\n\005m" + + "odel\030\r \001(\t\022\024\n\014use_enhanced\030\016 \001(\010\"\213\001\n\rAud" + + "ioEncoding\022\030\n\024ENCODING_UNSPECIFIED\020\000\022\014\n\010" + + "LINEAR16\020\001\022\010\n\004FLAC\020\002\022\t\n\005MULAW\020\003\022\007\n\003AMR\020\004" + + "\022\n\n\006AMR_WB\020\005\022\014\n\010OGG_OPUS\020\006\022\032\n\026SPEEX_WITH" + + "_HEADER_BYTE\020\007\"\216\001\n\030SpeakerDiarizationCon" + + "fig\022\"\n\032enable_speaker_diarization\030\001 \001(\010\022" + + "\031\n\021min_speaker_count\030\002 \001(\005\022\031\n\021max_speake" + + "r_count\030\003 \001(\005\022\030\n\013speaker_tag\030\005 \001(\005B\003\340A\003\"" + "\240\010\n\023RecognitionMetadata\022U\n\020interaction_t" + "ype\030\001 \001(\0162;.google.cloud.speech.v1.Recog" + "nitionMetadata.InteractionType\022$\n\034indust" @@ -191,36 +194,40 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "cloud.speech.v1.StreamingRecognizeRespon" + "se.SpeechEventType\"L\n\017SpeechEventType\022\034\n" + "\030SPEECH_EVENT_UNSPECIFIED\020\000\022\033\n\027END_OF_SI" - + "NGLE_UTTERANCE\020\001\"\355\001\n\032StreamingRecognitio" + + "NGLE_UTTERANCE\020\001\"\362\001\n\032StreamingRecognitio" + "nResult\022J\n\014alternatives\030\001 \003(\01324.google.c" + "loud.speech.v1.SpeechRecognitionAlternat" + "ive\022\020\n\010is_final\030\002 \001(\010\022\021\n\tstability\030\003 \001(\002" + "\0222\n\017result_end_time\030\004 \001(\0132\031.google.proto" - + "buf.Duration\022\023\n\013channel_tag\030\005 \001(\005\022\025\n\rlan" - + "guage_code\030\006 \001(\t\"z\n\027SpeechRecognitionRes" - + "ult\022J\n\014alternatives\030\001 \003(\01324.google.cloud" - + ".speech.v1.SpeechRecognitionAlternative\022" - + "\023\n\013channel_tag\030\002 \001(\005\"w\n\034SpeechRecognitio" - + "nAlternative\022\022\n\ntranscript\030\001 \001(\t\022\022\n\nconf" - + "idence\030\002 \001(\002\022/\n\005words\030\003 \003(\0132 .google.clo" - + "ud.speech.v1.WordInfo\"t\n\010WordInfo\022-\n\nsta" - + "rt_time\030\001 \001(\0132\031.google.protobuf.Duration" - + "\022+\n\010end_time\030\002 \001(\0132\031.google.protobuf.Dur" - + "ation\022\014\n\004word\030\003 \001(\t2\251\003\n\006Speech\022\201\001\n\tRecog" - + "nize\022(.google.cloud.speech.v1.RecognizeR" - + "equest\032).google.cloud.speech.v1.Recogniz" - + "eResponse\"\037\202\323\344\223\002\031\"\024/v1/speech:recognize:" - + "\001*\022\226\001\n\024LongRunningRecognize\0223.google.clo" - + "ud.speech.v1.LongRunningRecognizeRequest" - + "\032\035.google.longrunning.Operation\"*\202\323\344\223\002$\"" - + "\037/v1/speech:longrunningrecognize:\001*\022\201\001\n\022" - + "StreamingRecognize\0221.google.cloud.speech" - + ".v1.StreamingRecognizeRequest\0322.google.c" - + "loud.speech.v1.StreamingRecognizeRespons" - + "e\"\000(\0010\001Bl\n\032com.google.cloud.speech.v1B\013S" - + "peechProtoP\001Z - * Output only. Transcript text representing the words that the user spoke. + * Transcript text representing the words that the user spoke. *
* * string transcript = 1; @@ -153,7 +153,7 @@ public java.lang.String getTranscript() { * * *
-   * Output only. Transcript text representing the words that the user spoke.
+   * Transcript text representing the words that the user spoke.
    * 
* * string transcript = 1; @@ -176,7 +176,7 @@ public com.google.protobuf.ByteString getTranscriptBytes() { * * *
-   * Output only. The confidence estimate between 0.0 and 1.0. A higher number
+   * The confidence estimate between 0.0 and 1.0. A higher number
    * indicates an estimated greater likelihood that the recognized words are
    * correct. This field is set only for the top alternative of a non-streaming
    * result or, of a streaming result where `is_final=true`.
@@ -197,7 +197,7 @@ public float getConfidence() {
    *
    *
    * 
-   * Output only. A list of word-specific information for each recognized word.
+   * A list of word-specific information for each recognized word.
    * Note: When `enable_speaker_diarization` is true, you will see all the words
    * from the beginning of the audio.
    * 
@@ -211,7 +211,7 @@ public java.util.List getWordsList() { * * *
-   * Output only. A list of word-specific information for each recognized word.
+   * A list of word-specific information for each recognized word.
    * Note: When `enable_speaker_diarization` is true, you will see all the words
    * from the beginning of the audio.
    * 
@@ -226,7 +226,7 @@ public java.util.List getWordsList() { * * *
-   * Output only. A list of word-specific information for each recognized word.
+   * A list of word-specific information for each recognized word.
    * Note: When `enable_speaker_diarization` is true, you will see all the words
    * from the beginning of the audio.
    * 
@@ -240,7 +240,7 @@ public int getWordsCount() { * * *
-   * Output only. A list of word-specific information for each recognized word.
+   * A list of word-specific information for each recognized word.
    * Note: When `enable_speaker_diarization` is true, you will see all the words
    * from the beginning of the audio.
    * 
@@ -254,7 +254,7 @@ public com.google.cloud.speech.v1.WordInfo getWords(int index) { * * *
-   * Output only. A list of word-specific information for each recognized word.
+   * A list of word-specific information for each recognized word.
    * Note: When `enable_speaker_diarization` is true, you will see all the words
    * from the beginning of the audio.
    * 
@@ -665,7 +665,7 @@ public Builder mergeFrom( * * *
-     * Output only. Transcript text representing the words that the user spoke.
+     * Transcript text representing the words that the user spoke.
      * 
* * string transcript = 1; @@ -685,7 +685,7 @@ public java.lang.String getTranscript() { * * *
-     * Output only. Transcript text representing the words that the user spoke.
+     * Transcript text representing the words that the user spoke.
      * 
* * string transcript = 1; @@ -705,7 +705,7 @@ public com.google.protobuf.ByteString getTranscriptBytes() { * * *
-     * Output only. Transcript text representing the words that the user spoke.
+     * Transcript text representing the words that the user spoke.
      * 
* * string transcript = 1; @@ -723,7 +723,7 @@ public Builder setTranscript(java.lang.String value) { * * *
-     * Output only. Transcript text representing the words that the user spoke.
+     * Transcript text representing the words that the user spoke.
      * 
* * string transcript = 1; @@ -738,7 +738,7 @@ public Builder clearTranscript() { * * *
-     * Output only. Transcript text representing the words that the user spoke.
+     * Transcript text representing the words that the user spoke.
      * 
* * string transcript = 1; @@ -759,7 +759,7 @@ public Builder setTranscriptBytes(com.google.protobuf.ByteString value) { * * *
-     * Output only. The confidence estimate between 0.0 and 1.0. A higher number
+     * The confidence estimate between 0.0 and 1.0. A higher number
      * indicates an estimated greater likelihood that the recognized words are
      * correct. This field is set only for the top alternative of a non-streaming
      * result or, of a streaming result where `is_final=true`.
@@ -777,7 +777,7 @@ public float getConfidence() {
      *
      *
      * 
-     * Output only. The confidence estimate between 0.0 and 1.0. A higher number
+     * The confidence estimate between 0.0 and 1.0. A higher number
      * indicates an estimated greater likelihood that the recognized words are
      * correct. This field is set only for the top alternative of a non-streaming
      * result or, of a streaming result where `is_final=true`.
@@ -798,7 +798,7 @@ public Builder setConfidence(float value) {
      *
      *
      * 
-     * Output only. The confidence estimate between 0.0 and 1.0. A higher number
+     * The confidence estimate between 0.0 and 1.0. A higher number
      * indicates an estimated greater likelihood that the recognized words are
      * correct. This field is set only for the top alternative of a non-streaming
      * result or, of a streaming result where `is_final=true`.
@@ -836,7 +836,7 @@ private void ensureWordsIsMutable() {
      *
      *
      * 
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -854,7 +854,7 @@ public java.util.List getWordsList() { * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -872,7 +872,7 @@ public int getWordsCount() { * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -890,7 +890,7 @@ public com.google.cloud.speech.v1.WordInfo getWords(int index) { * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -914,7 +914,7 @@ public Builder setWords(int index, com.google.cloud.speech.v1.WordInfo value) { * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -936,7 +936,7 @@ public Builder setWords( * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -960,7 +960,7 @@ public Builder addWords(com.google.cloud.speech.v1.WordInfo value) { * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -984,7 +984,7 @@ public Builder addWords(int index, com.google.cloud.speech.v1.WordInfo value) { * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -1005,7 +1005,7 @@ public Builder addWords(com.google.cloud.speech.v1.WordInfo.Builder builderForVa * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -1027,7 +1027,7 @@ public Builder addWords( * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -1049,7 +1049,7 @@ public Builder addAllWords( * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -1070,7 +1070,7 @@ public Builder clearWords() { * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -1091,7 +1091,7 @@ public Builder removeWords(int index) { * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -1105,7 +1105,7 @@ public com.google.cloud.speech.v1.WordInfo.Builder getWordsBuilder(int index) { * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -1123,7 +1123,7 @@ public com.google.cloud.speech.v1.WordInfoOrBuilder getWordsOrBuilder(int index) * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -1142,7 +1142,7 @@ public com.google.cloud.speech.v1.WordInfoOrBuilder getWordsOrBuilder(int index) * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -1157,7 +1157,7 @@ public com.google.cloud.speech.v1.WordInfo.Builder addWordsBuilder() { * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -1172,7 +1172,7 @@ public com.google.cloud.speech.v1.WordInfo.Builder addWordsBuilder(int index) { * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechRecognitionAlternativeOrBuilder.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechRecognitionAlternativeOrBuilder.java index 405e802ef..b535ea601 100644 --- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechRecognitionAlternativeOrBuilder.java +++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechRecognitionAlternativeOrBuilder.java @@ -27,7 +27,7 @@ public interface SpeechRecognitionAlternativeOrBuilder * * *
-   * Output only. Transcript text representing the words that the user spoke.
+   * Transcript text representing the words that the user spoke.
    * 
* * string transcript = 1; @@ -37,7 +37,7 @@ public interface SpeechRecognitionAlternativeOrBuilder * * *
-   * Output only. Transcript text representing the words that the user spoke.
+   * Transcript text representing the words that the user spoke.
    * 
* * string transcript = 1; @@ -48,7 +48,7 @@ public interface SpeechRecognitionAlternativeOrBuilder * * *
-   * Output only. The confidence estimate between 0.0 and 1.0. A higher number
+   * The confidence estimate between 0.0 and 1.0. A higher number
    * indicates an estimated greater likelihood that the recognized words are
    * correct. This field is set only for the top alternative of a non-streaming
    * result or, of a streaming result where `is_final=true`.
@@ -65,7 +65,7 @@ public interface SpeechRecognitionAlternativeOrBuilder
    *
    *
    * 
-   * Output only. A list of word-specific information for each recognized word.
+   * A list of word-specific information for each recognized word.
    * Note: When `enable_speaker_diarization` is true, you will see all the words
    * from the beginning of the audio.
    * 
@@ -77,7 +77,7 @@ public interface SpeechRecognitionAlternativeOrBuilder * * *
-   * Output only. A list of word-specific information for each recognized word.
+   * A list of word-specific information for each recognized word.
    * Note: When `enable_speaker_diarization` is true, you will see all the words
    * from the beginning of the audio.
    * 
@@ -89,7 +89,7 @@ public interface SpeechRecognitionAlternativeOrBuilder * * *
-   * Output only. A list of word-specific information for each recognized word.
+   * A list of word-specific information for each recognized word.
    * Note: When `enable_speaker_diarization` is true, you will see all the words
    * from the beginning of the audio.
    * 
@@ -101,7 +101,7 @@ public interface SpeechRecognitionAlternativeOrBuilder * * *
-   * Output only. A list of word-specific information for each recognized word.
+   * A list of word-specific information for each recognized word.
    * Note: When `enable_speaker_diarization` is true, you will see all the words
    * from the beginning of the audio.
    * 
@@ -113,7 +113,7 @@ public interface SpeechRecognitionAlternativeOrBuilder * * *
-   * Output only. A list of word-specific information for each recognized word.
+   * A list of word-specific information for each recognized word.
    * Note: When `enable_speaker_diarization` is true, you will see all the words
    * from the beginning of the audio.
    * 
diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechRecognitionResult.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechRecognitionResult.java index 437f2e674..5f4b209bf 100644 --- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechRecognitionResult.java +++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechRecognitionResult.java @@ -128,7 +128,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -144,7 +144,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -160,7 +160,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -175,7 +175,7 @@ public int getAlternativesCount() {
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -190,7 +190,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionAlternative getAlternatives(i
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -619,7 +619,7 @@ private void ensureAlternativesIsMutable() {
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -639,7 +639,7 @@ private void ensureAlternativesIsMutable() {
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -658,7 +658,7 @@ public int getAlternativesCount() {
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -677,7 +677,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionAlternative getAlternatives(i
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -703,7 +703,7 @@ public Builder setAlternatives(
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -727,7 +727,7 @@ public Builder setAlternatives(
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -752,7 +752,7 @@ public Builder addAlternatives(com.google.cloud.speech.v1.SpeechRecognitionAlter
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -778,7 +778,7 @@ public Builder addAlternatives(
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -801,7 +801,7 @@ public Builder addAlternatives(
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -825,7 +825,7 @@ public Builder addAlternatives(
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -849,7 +849,7 @@ public Builder addAllAlternatives(
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -871,7 +871,7 @@ public Builder clearAlternatives() {
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -893,7 +893,7 @@ public Builder removeAlternatives(int index) {
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -909,7 +909,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionAlternative.Builder getAltern
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -929,7 +929,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionAlternative.Builder getAltern
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -950,7 +950,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionAlternative.Builder getAltern
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -967,7 +967,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionAlternative.Builder getAltern
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -985,7 +985,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionAlternative.Builder addAltern
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechRecognitionResultOrBuilder.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechRecognitionResultOrBuilder.java
index f242981c3..a5cf3b4c1 100644
--- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechRecognitionResultOrBuilder.java
+++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechRecognitionResultOrBuilder.java
@@ -27,7 +27,7 @@ public interface SpeechRecognitionResultOrBuilder
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -40,7 +40,7 @@ public interface SpeechRecognitionResultOrBuilder
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -53,7 +53,7 @@ public interface SpeechRecognitionResultOrBuilder
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -66,7 +66,7 @@ public interface SpeechRecognitionResultOrBuilder
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -80,7 +80,7 @@ public interface SpeechRecognitionResultOrBuilder
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognitionConfig.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognitionConfig.java
index 5d7ae6edd..f8a8d1cae 100644
--- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognitionConfig.java
+++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognitionConfig.java
@@ -130,11 +130,13 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
    *
    *
    * 
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public boolean hasConfig() { return config_ != null; @@ -143,11 +145,13 @@ public boolean hasConfig() { * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1.RecognitionConfig getConfig() { return config_ == null @@ -158,11 +162,13 @@ public com.google.cloud.speech.v1.RecognitionConfig getConfig() { * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1.RecognitionConfigOrBuilder getConfigOrBuilder() { return getConfig(); @@ -174,7 +180,7 @@ public com.google.cloud.speech.v1.RecognitionConfigOrBuilder getConfigOrBuilder( * * *
-   * *Optional* If `false` or omitted, the recognizer will perform continuous
+   * If `false` or omitted, the recognizer will perform continuous
    * recognition (continuing to wait for and process audio even if the user
    * pauses speaking) until the client closes the input stream (gRPC API) or
    * until the maximum time limit has been reached. May return multiple
@@ -198,7 +204,7 @@ public boolean getSingleUtterance() {
    *
    *
    * 
-   * *Optional* If `true`, interim results (tentative hypotheses) may be
+   * If `true`, interim results (tentative hypotheses) may be
    * returned as they become available (these interim results are indicated with
    * the `is_final=false` flag).
    * If `false` or omitted, only `is_final=true` result(s) are returned.
@@ -582,11 +588,13 @@ public Builder mergeFrom(
      *
      *
      * 
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public boolean hasConfig() { return configBuilder_ != null || config_ != null; @@ -595,11 +603,13 @@ public boolean hasConfig() { * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1.RecognitionConfig getConfig() { if (configBuilder_ == null) { @@ -614,11 +624,13 @@ public com.google.cloud.speech.v1.RecognitionConfig getConfig() { * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder setConfig(com.google.cloud.speech.v1.RecognitionConfig value) { if (configBuilder_ == null) { @@ -637,11 +649,13 @@ public Builder setConfig(com.google.cloud.speech.v1.RecognitionConfig value) { * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder setConfig(com.google.cloud.speech.v1.RecognitionConfig.Builder builderForValue) { if (configBuilder_ == null) { @@ -657,11 +671,13 @@ public Builder setConfig(com.google.cloud.speech.v1.RecognitionConfig.Builder bu * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder mergeConfig(com.google.cloud.speech.v1.RecognitionConfig value) { if (configBuilder_ == null) { @@ -684,11 +700,13 @@ public Builder mergeConfig(com.google.cloud.speech.v1.RecognitionConfig value) { * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder clearConfig() { if (configBuilder_ == null) { @@ -705,11 +723,13 @@ public Builder clearConfig() { * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1.RecognitionConfig.Builder getConfigBuilder() { @@ -720,11 +740,13 @@ public com.google.cloud.speech.v1.RecognitionConfig.Builder getConfigBuilder() { * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1.RecognitionConfigOrBuilder getConfigOrBuilder() { if (configBuilder_ != null) { @@ -739,11 +761,13 @@ public com.google.cloud.speech.v1.RecognitionConfigOrBuilder getConfigOrBuilder( * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.speech.v1.RecognitionConfig, @@ -767,7 +791,7 @@ public com.google.cloud.speech.v1.RecognitionConfigOrBuilder getConfigOrBuilder( * * *
-     * *Optional* If `false` or omitted, the recognizer will perform continuous
+     * If `false` or omitted, the recognizer will perform continuous
      * recognition (continuing to wait for and process audio even if the user
      * pauses speaking) until the client closes the input stream (gRPC API) or
      * until the maximum time limit has been reached. May return multiple
@@ -788,7 +812,7 @@ public boolean getSingleUtterance() {
      *
      *
      * 
-     * *Optional* If `false` or omitted, the recognizer will perform continuous
+     * If `false` or omitted, the recognizer will perform continuous
      * recognition (continuing to wait for and process audio even if the user
      * pauses speaking) until the client closes the input stream (gRPC API) or
      * until the maximum time limit has been reached. May return multiple
@@ -812,7 +836,7 @@ public Builder setSingleUtterance(boolean value) {
      *
      *
      * 
-     * *Optional* If `false` or omitted, the recognizer will perform continuous
+     * If `false` or omitted, the recognizer will perform continuous
      * recognition (continuing to wait for and process audio even if the user
      * pauses speaking) until the client closes the input stream (gRPC API) or
      * until the maximum time limit has been reached. May return multiple
@@ -838,7 +862,7 @@ public Builder clearSingleUtterance() {
      *
      *
      * 
-     * *Optional* If `true`, interim results (tentative hypotheses) may be
+     * If `true`, interim results (tentative hypotheses) may be
      * returned as they become available (these interim results are indicated with
      * the `is_final=false` flag).
      * If `false` or omitted, only `is_final=true` result(s) are returned.
@@ -853,7 +877,7 @@ public boolean getInterimResults() {
      *
      *
      * 
-     * *Optional* If `true`, interim results (tentative hypotheses) may be
+     * If `true`, interim results (tentative hypotheses) may be
      * returned as they become available (these interim results are indicated with
      * the `is_final=false` flag).
      * If `false` or omitted, only `is_final=true` result(s) are returned.
@@ -871,7 +895,7 @@ public Builder setInterimResults(boolean value) {
      *
      *
      * 
-     * *Optional* If `true`, interim results (tentative hypotheses) may be
+     * If `true`, interim results (tentative hypotheses) may be
      * returned as they become available (these interim results are indicated with
      * the `is_final=false` flag).
      * If `false` or omitted, only `is_final=true` result(s) are returned.
diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognitionConfigOrBuilder.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognitionConfigOrBuilder.java
index d5d44e561..5e05ebe8b 100644
--- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognitionConfigOrBuilder.java
+++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognitionConfigOrBuilder.java
@@ -27,33 +27,39 @@ public interface StreamingRecognitionConfigOrBuilder
    *
    *
    * 
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ boolean hasConfig(); /** * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ com.google.cloud.speech.v1.RecognitionConfig getConfig(); /** * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ com.google.cloud.speech.v1.RecognitionConfigOrBuilder getConfigOrBuilder(); @@ -61,7 +67,7 @@ public interface StreamingRecognitionConfigOrBuilder * * *
-   * *Optional* If `false` or omitted, the recognizer will perform continuous
+   * If `false` or omitted, the recognizer will perform continuous
    * recognition (continuing to wait for and process audio even if the user
    * pauses speaking) until the client closes the input stream (gRPC API) or
    * until the maximum time limit has been reached. May return multiple
@@ -81,7 +87,7 @@ public interface StreamingRecognitionConfigOrBuilder
    *
    *
    * 
-   * *Optional* If `true`, interim results (tentative hypotheses) may be
+   * If `true`, interim results (tentative hypotheses) may be
    * returned as they become available (these interim results are indicated with
    * the `is_final=false` flag).
    * If `false` or omitted, only `is_final=true` result(s) are returned.
diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognitionResult.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognitionResult.java
index 722b35891..5f2f80b54 100644
--- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognitionResult.java
+++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognitionResult.java
@@ -162,7 +162,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -178,7 +178,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -194,7 +194,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -209,7 +209,7 @@ public int getAlternativesCount() {
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -224,7 +224,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionAlternative getAlternatives(i
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -243,7 +243,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionAlternativeOrBuilder getAlter
    *
    *
    * 
-   * Output only. If `false`, this `StreamingRecognitionResult` represents an
+   * If `false`, this `StreamingRecognitionResult` represents an
    * interim result that may change. If `true`, this is the final time the
    * speech service will return this particular `StreamingRecognitionResult`,
    * the recognizer will not return any further hypotheses for this portion of
@@ -262,7 +262,7 @@ public boolean getIsFinal() {
    *
    *
    * 
-   * Output only. An estimate of the likelihood that the recognizer will not
+   * An estimate of the likelihood that the recognizer will not
    * change its guess about this interim result. Values range from 0.0
    * (completely unstable) to 1.0 (completely stable).
    * This field is only provided for interim results (`is_final=false`).
@@ -281,7 +281,7 @@ public float getStability() {
    *
    *
    * 
-   * Output only. Time offset of the end of this result relative to the
+   * Time offset of the end of this result relative to the
    * beginning of the audio.
    * 
* @@ -294,7 +294,7 @@ public boolean hasResultEndTime() { * * *
-   * Output only. Time offset of the end of this result relative to the
+   * Time offset of the end of this result relative to the
    * beginning of the audio.
    * 
* @@ -309,7 +309,7 @@ public com.google.protobuf.Duration getResultEndTime() { * * *
-   * Output only. Time offset of the end of this result relative to the
+   * Time offset of the end of this result relative to the
    * beginning of the audio.
    * 
* @@ -342,13 +342,12 @@ public int getChannelTag() { * * *
-   * Output only. The
-   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-   * language in this result. This language code was detected to have the most
-   * likelihood of being spoken in the audio.
+   * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of
+   * the language in this result. This language code was detected to have the
+   * most likelihood of being spoken in the audio.
    * 
* - * string language_code = 6; + * string language_code = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; */ public java.lang.String getLanguageCode() { java.lang.Object ref = languageCode_; @@ -365,13 +364,12 @@ public java.lang.String getLanguageCode() { * * *
-   * Output only. The
-   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-   * language in this result. This language code was detected to have the most
-   * likelihood of being spoken in the audio.
+   * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of
+   * the language in this result. This language code was detected to have the
+   * most likelihood of being spoken in the audio.
    * 
* - * string language_code = 6; + * string language_code = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; */ public com.google.protobuf.ByteString getLanguageCodeBytes() { java.lang.Object ref = languageCode_; @@ -861,7 +859,7 @@ private void ensureAlternativesIsMutable() { * * *
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -881,7 +879,7 @@ private void ensureAlternativesIsMutable() {
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -900,7 +898,7 @@ public int getAlternativesCount() {
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -919,7 +917,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionAlternative getAlternatives(i
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -945,7 +943,7 @@ public Builder setAlternatives(
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -969,7 +967,7 @@ public Builder setAlternatives(
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -994,7 +992,7 @@ public Builder addAlternatives(com.google.cloud.speech.v1.SpeechRecognitionAlter
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1020,7 +1018,7 @@ public Builder addAlternatives(
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1043,7 +1041,7 @@ public Builder addAlternatives(
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1067,7 +1065,7 @@ public Builder addAlternatives(
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1091,7 +1089,7 @@ public Builder addAllAlternatives(
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1113,7 +1111,7 @@ public Builder clearAlternatives() {
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1135,7 +1133,7 @@ public Builder removeAlternatives(int index) {
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1151,7 +1149,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionAlternative.Builder getAltern
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1171,7 +1169,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionAlternative.Builder getAltern
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1192,7 +1190,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionAlternative.Builder getAltern
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1209,7 +1207,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionAlternative.Builder getAltern
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1227,7 +1225,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionAlternative.Builder addAltern
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1262,7 +1260,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionAlternative.Builder addAltern
      *
      *
      * 
-     * Output only. If `false`, this `StreamingRecognitionResult` represents an
+     * If `false`, this `StreamingRecognitionResult` represents an
      * interim result that may change. If `true`, this is the final time the
      * speech service will return this particular `StreamingRecognitionResult`,
      * the recognizer will not return any further hypotheses for this portion of
@@ -1278,7 +1276,7 @@ public boolean getIsFinal() {
      *
      *
      * 
-     * Output only. If `false`, this `StreamingRecognitionResult` represents an
+     * If `false`, this `StreamingRecognitionResult` represents an
      * interim result that may change. If `true`, this is the final time the
      * speech service will return this particular `StreamingRecognitionResult`,
      * the recognizer will not return any further hypotheses for this portion of
@@ -1297,7 +1295,7 @@ public Builder setIsFinal(boolean value) {
      *
      *
      * 
-     * Output only. If `false`, this `StreamingRecognitionResult` represents an
+     * If `false`, this `StreamingRecognitionResult` represents an
      * interim result that may change. If `true`, this is the final time the
      * speech service will return this particular `StreamingRecognitionResult`,
      * the recognizer will not return any further hypotheses for this portion of
@@ -1318,7 +1316,7 @@ public Builder clearIsFinal() {
      *
      *
      * 
-     * Output only. An estimate of the likelihood that the recognizer will not
+     * An estimate of the likelihood that the recognizer will not
      * change its guess about this interim result. Values range from 0.0
      * (completely unstable) to 1.0 (completely stable).
      * This field is only provided for interim results (`is_final=false`).
@@ -1334,7 +1332,7 @@ public float getStability() {
      *
      *
      * 
-     * Output only. An estimate of the likelihood that the recognizer will not
+     * An estimate of the likelihood that the recognizer will not
      * change its guess about this interim result. Values range from 0.0
      * (completely unstable) to 1.0 (completely stable).
      * This field is only provided for interim results (`is_final=false`).
@@ -1353,7 +1351,7 @@ public Builder setStability(float value) {
      *
      *
      * 
-     * Output only. An estimate of the likelihood that the recognizer will not
+     * An estimate of the likelihood that the recognizer will not
      * change its guess about this interim result. Values range from 0.0
      * (completely unstable) to 1.0 (completely stable).
      * This field is only provided for interim results (`is_final=false`).
@@ -1379,7 +1377,7 @@ public Builder clearStability() {
      *
      *
      * 
-     * Output only. Time offset of the end of this result relative to the
+     * Time offset of the end of this result relative to the
      * beginning of the audio.
      * 
* @@ -1392,7 +1390,7 @@ public boolean hasResultEndTime() { * * *
-     * Output only. Time offset of the end of this result relative to the
+     * Time offset of the end of this result relative to the
      * beginning of the audio.
      * 
* @@ -1411,7 +1409,7 @@ public com.google.protobuf.Duration getResultEndTime() { * * *
-     * Output only. Time offset of the end of this result relative to the
+     * Time offset of the end of this result relative to the
      * beginning of the audio.
      * 
* @@ -1434,7 +1432,7 @@ public Builder setResultEndTime(com.google.protobuf.Duration value) { * * *
-     * Output only. Time offset of the end of this result relative to the
+     * Time offset of the end of this result relative to the
      * beginning of the audio.
      * 
* @@ -1454,7 +1452,7 @@ public Builder setResultEndTime(com.google.protobuf.Duration.Builder builderForV * * *
-     * Output only. Time offset of the end of this result relative to the
+     * Time offset of the end of this result relative to the
      * beginning of the audio.
      * 
* @@ -1481,7 +1479,7 @@ public Builder mergeResultEndTime(com.google.protobuf.Duration value) { * * *
-     * Output only. Time offset of the end of this result relative to the
+     * Time offset of the end of this result relative to the
      * beginning of the audio.
      * 
* @@ -1502,7 +1500,7 @@ public Builder clearResultEndTime() { * * *
-     * Output only. Time offset of the end of this result relative to the
+     * Time offset of the end of this result relative to the
      * beginning of the audio.
      * 
* @@ -1517,7 +1515,7 @@ public com.google.protobuf.Duration.Builder getResultEndTimeBuilder() { * * *
-     * Output only. Time offset of the end of this result relative to the
+     * Time offset of the end of this result relative to the
      * beginning of the audio.
      * 
* @@ -1536,7 +1534,7 @@ public com.google.protobuf.DurationOrBuilder getResultEndTimeOrBuilder() { * * *
-     * Output only. Time offset of the end of this result relative to the
+     * Time offset of the end of this result relative to the
      * beginning of the audio.
      * 
* @@ -1614,13 +1612,12 @@ public Builder clearChannelTag() { * * *
-     * Output only. The
-     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-     * language in this result. This language code was detected to have the most
-     * likelihood of being spoken in the audio.
+     * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of
+     * the language in this result. This language code was detected to have the
+     * most likelihood of being spoken in the audio.
      * 
* - * string language_code = 6; + * string language_code = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; */ public java.lang.String getLanguageCode() { java.lang.Object ref = languageCode_; @@ -1637,13 +1634,12 @@ public java.lang.String getLanguageCode() { * * *
-     * Output only. The
-     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-     * language in this result. This language code was detected to have the most
-     * likelihood of being spoken in the audio.
+     * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of
+     * the language in this result. This language code was detected to have the
+     * most likelihood of being spoken in the audio.
      * 
* - * string language_code = 6; + * string language_code = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; */ public com.google.protobuf.ByteString getLanguageCodeBytes() { java.lang.Object ref = languageCode_; @@ -1660,13 +1656,12 @@ public com.google.protobuf.ByteString getLanguageCodeBytes() { * * *
-     * Output only. The
-     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-     * language in this result. This language code was detected to have the most
-     * likelihood of being spoken in the audio.
+     * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of
+     * the language in this result. This language code was detected to have the
+     * most likelihood of being spoken in the audio.
      * 
* - * string language_code = 6; + * string language_code = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; */ public Builder setLanguageCode(java.lang.String value) { if (value == null) { @@ -1681,13 +1676,12 @@ public Builder setLanguageCode(java.lang.String value) { * * *
-     * Output only. The
-     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-     * language in this result. This language code was detected to have the most
-     * likelihood of being spoken in the audio.
+     * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of
+     * the language in this result. This language code was detected to have the
+     * most likelihood of being spoken in the audio.
      * 
* - * string language_code = 6; + * string language_code = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; */ public Builder clearLanguageCode() { @@ -1699,13 +1693,12 @@ public Builder clearLanguageCode() { * * *
-     * Output only. The
-     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-     * language in this result. This language code was detected to have the most
-     * likelihood of being spoken in the audio.
+     * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of
+     * the language in this result. This language code was detected to have the
+     * most likelihood of being spoken in the audio.
      * 
* - * string language_code = 6; + * string language_code = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; */ public Builder setLanguageCodeBytes(com.google.protobuf.ByteString value) { if (value == null) { diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognitionResultOrBuilder.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognitionResultOrBuilder.java index 917089817..b7688553d 100644 --- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognitionResultOrBuilder.java +++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognitionResultOrBuilder.java @@ -27,7 +27,7 @@ public interface StreamingRecognitionResultOrBuilder * * *
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -40,7 +40,7 @@ public interface StreamingRecognitionResultOrBuilder
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -53,7 +53,7 @@ public interface StreamingRecognitionResultOrBuilder
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -66,7 +66,7 @@ public interface StreamingRecognitionResultOrBuilder
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -80,7 +80,7 @@ public interface StreamingRecognitionResultOrBuilder
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -95,7 +95,7 @@ com.google.cloud.speech.v1.SpeechRecognitionAlternativeOrBuilder getAlternatives
    *
    *
    * 
-   * Output only. If `false`, this `StreamingRecognitionResult` represents an
+   * If `false`, this `StreamingRecognitionResult` represents an
    * interim result that may change. If `true`, this is the final time the
    * speech service will return this particular `StreamingRecognitionResult`,
    * the recognizer will not return any further hypotheses for this portion of
@@ -110,7 +110,7 @@ com.google.cloud.speech.v1.SpeechRecognitionAlternativeOrBuilder getAlternatives
    *
    *
    * 
-   * Output only. An estimate of the likelihood that the recognizer will not
+   * An estimate of the likelihood that the recognizer will not
    * change its guess about this interim result. Values range from 0.0
    * (completely unstable) to 1.0 (completely stable).
    * This field is only provided for interim results (`is_final=false`).
@@ -125,7 +125,7 @@ com.google.cloud.speech.v1.SpeechRecognitionAlternativeOrBuilder getAlternatives
    *
    *
    * 
-   * Output only. Time offset of the end of this result relative to the
+   * Time offset of the end of this result relative to the
    * beginning of the audio.
    * 
* @@ -136,7 +136,7 @@ com.google.cloud.speech.v1.SpeechRecognitionAlternativeOrBuilder getAlternatives * * *
-   * Output only. Time offset of the end of this result relative to the
+   * Time offset of the end of this result relative to the
    * beginning of the audio.
    * 
* @@ -147,7 +147,7 @@ com.google.cloud.speech.v1.SpeechRecognitionAlternativeOrBuilder getAlternatives * * *
-   * Output only. Time offset of the end of this result relative to the
+   * Time offset of the end of this result relative to the
    * beginning of the audio.
    * 
* @@ -172,26 +172,24 @@ com.google.cloud.speech.v1.SpeechRecognitionAlternativeOrBuilder getAlternatives * * *
-   * Output only. The
-   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-   * language in this result. This language code was detected to have the most
-   * likelihood of being spoken in the audio.
+   * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of
+   * the language in this result. This language code was detected to have the
+   * most likelihood of being spoken in the audio.
    * 
* - * string language_code = 6; + * string language_code = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; */ java.lang.String getLanguageCode(); /** * * *
-   * Output only. The
-   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-   * language in this result. This language code was detected to have the most
-   * likelihood of being spoken in the audio.
+   * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of
+   * the language in this result. This language code was detected to have the
+   * most likelihood of being spoken in the audio.
    * 
* - * string language_code = 6; + * string language_code = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; */ com.google.protobuf.ByteString getLanguageCodeBytes(); } diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognizeRequest.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognizeRequest.java index 8143e608b..0a3bdb121 100644 --- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognizeRequest.java +++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognizeRequest.java @@ -24,9 +24,9 @@ *
  * The top-level message sent by the client for the `StreamingRecognize` method.
  * Multiple `StreamingRecognizeRequest` messages are sent. The first message
- * must contain a `streaming_config` message and must not contain `audio` data.
- * All subsequent messages must contain `audio` data and must not contain a
- * `streaming_config` message.
+ * must contain a `streaming_config` message and must not contain
+ * `audio_content`. All subsequent messages must contain `audio_content` and
+ * must not contain a `streaming_config` message.
  * 
* * Protobuf type {@code google.cloud.speech.v1.StreamingRecognizeRequest} @@ -228,9 +228,9 @@ public com.google.cloud.speech.v1.StreamingRecognitionConfig getStreamingConfig( * `StreamingRecognizeRequest` message must not contain `audio_content` data * and all subsequent `StreamingRecognizeRequest` messages must contain * `audio_content` data. The audio bytes must be encoded as specified in - * `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a + * `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a * pure binary representation (not base64). See - * [content limits](/speech-to-text/quotas#content). + * [content limits](https://cloud.google.com/speech-to-text/quotas#content). *
* * bytes audio_content = 2; @@ -438,9 +438,9 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build *
    * The top-level message sent by the client for the `StreamingRecognize` method.
    * Multiple `StreamingRecognizeRequest` messages are sent. The first message
-   * must contain a `streaming_config` message and must not contain `audio` data.
-   * All subsequent messages must contain `audio` data and must not contain a
-   * `streaming_config` message.
+   * must contain a `streaming_config` message and must not contain
+   * `audio_content`. All subsequent messages must contain `audio_content` and
+   * must not contain a `streaming_config` message.
    * 
* * Protobuf type {@code google.cloud.speech.v1.StreamingRecognizeRequest} @@ -865,9 +865,9 @@ public Builder clearStreamingConfig() { * `StreamingRecognizeRequest` message must not contain `audio_content` data * and all subsequent `StreamingRecognizeRequest` messages must contain * `audio_content` data. The audio bytes must be encoded as specified in - * `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a + * `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a * pure binary representation (not base64). See - * [content limits](/speech-to-text/quotas#content). + * [content limits](https://cloud.google.com/speech-to-text/quotas#content). *
* * bytes audio_content = 2; @@ -887,9 +887,9 @@ public com.google.protobuf.ByteString getAudioContent() { * `StreamingRecognizeRequest` message must not contain `audio_content` data * and all subsequent `StreamingRecognizeRequest` messages must contain * `audio_content` data. The audio bytes must be encoded as specified in - * `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a + * `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a * pure binary representation (not base64). See - * [content limits](/speech-to-text/quotas#content). + * [content limits](https://cloud.google.com/speech-to-text/quotas#content). *
* * bytes audio_content = 2; @@ -912,9 +912,9 @@ public Builder setAudioContent(com.google.protobuf.ByteString value) { * `StreamingRecognizeRequest` message must not contain `audio_content` data * and all subsequent `StreamingRecognizeRequest` messages must contain * `audio_content` data. The audio bytes must be encoded as specified in - * `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a + * `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a * pure binary representation (not base64). See - * [content limits](/speech-to-text/quotas#content). + * [content limits](https://cloud.google.com/speech-to-text/quotas#content). *
* * bytes audio_content = 2; diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognizeRequestOrBuilder.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognizeRequestOrBuilder.java index e39bd37b0..206598e71 100644 --- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognizeRequestOrBuilder.java +++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognizeRequestOrBuilder.java @@ -69,9 +69,9 @@ public interface StreamingRecognizeRequestOrBuilder * `StreamingRecognizeRequest` message must not contain `audio_content` data * and all subsequent `StreamingRecognizeRequest` messages must contain * `audio_content` data. The audio bytes must be encoded as specified in - * `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a + * `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a * pure binary representation (not base64). See - * [content limits](/speech-to-text/quotas#content). + * [content limits](https://cloud.google.com/speech-to-text/quotas#content). *
* * bytes audio_content = 2; diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognizeResponse.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognizeResponse.java index 62a94fcbc..f04bec500 100644 --- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognizeResponse.java +++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognizeResponse.java @@ -319,8 +319,8 @@ private SpeechEventType(int value) { * * *
-   * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-   * message that specifies the error for the operation.
+   * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+   * specifies the error for the operation.
    * 
* * .google.rpc.Status error = 1; @@ -332,8 +332,8 @@ public boolean hasError() { * * *
-   * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-   * message that specifies the error for the operation.
+   * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+   * specifies the error for the operation.
    * 
* * .google.rpc.Status error = 1; @@ -345,8 +345,8 @@ public com.google.rpc.Status getError() { * * *
-   * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-   * message that specifies the error for the operation.
+   * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+   * specifies the error for the operation.
    * 
* * .google.rpc.Status error = 1; @@ -361,7 +361,7 @@ public com.google.rpc.StatusOrBuilder getErrorOrBuilder() { * * *
-   * Output only. This repeated list contains zero or more results that
+   * This repeated list contains zero or more results that
    * correspond to consecutive portions of the audio currently being processed.
    * It contains zero or one `is_final=true` result (the newly settled portion),
    * followed by zero or more `is_final=false` results (the interim results).
@@ -376,7 +376,7 @@ public java.util.List get
    *
    *
    * 
-   * Output only. This repeated list contains zero or more results that
+   * This repeated list contains zero or more results that
    * correspond to consecutive portions of the audio currently being processed.
    * It contains zero or one `is_final=true` result (the newly settled portion),
    * followed by zero or more `is_final=false` results (the interim results).
@@ -392,7 +392,7 @@ public java.util.List get
    *
    *
    * 
-   * Output only. This repeated list contains zero or more results that
+   * This repeated list contains zero or more results that
    * correspond to consecutive portions of the audio currently being processed.
    * It contains zero or one `is_final=true` result (the newly settled portion),
    * followed by zero or more `is_final=false` results (the interim results).
@@ -407,7 +407,7 @@ public int getResultsCount() {
    *
    *
    * 
-   * Output only. This repeated list contains zero or more results that
+   * This repeated list contains zero or more results that
    * correspond to consecutive portions of the audio currently being processed.
    * It contains zero or one `is_final=true` result (the newly settled portion),
    * followed by zero or more `is_final=false` results (the interim results).
@@ -422,7 +422,7 @@ public com.google.cloud.speech.v1.StreamingRecognitionResult getResults(int inde
    *
    *
    * 
-   * Output only. This repeated list contains zero or more results that
+   * This repeated list contains zero or more results that
    * correspond to consecutive portions of the audio currently being processed.
    * It contains zero or one `is_final=true` result (the newly settled portion),
    * followed by zero or more `is_final=false` results (the interim results).
@@ -441,7 +441,7 @@ public com.google.cloud.speech.v1.StreamingRecognitionResultOrBuilder getResults
    *
    *
    * 
-   * Output only. Indicates the type of speech event.
+   * Indicates the type of speech event.
    * 
* * .google.cloud.speech.v1.StreamingRecognizeResponse.SpeechEventType speech_event_type = 4; @@ -454,7 +454,7 @@ public int getSpeechEventTypeValue() { * * *
-   * Output only. Indicates the type of speech event.
+   * Indicates the type of speech event.
    * 
* * .google.cloud.speech.v1.StreamingRecognizeResponse.SpeechEventType speech_event_type = 4; @@ -926,8 +926,8 @@ public Builder mergeFrom( * * *
-     * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-     * message that specifies the error for the operation.
+     * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+     * specifies the error for the operation.
      * 
* * .google.rpc.Status error = 1; @@ -939,8 +939,8 @@ public boolean hasError() { * * *
-     * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-     * message that specifies the error for the operation.
+     * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+     * specifies the error for the operation.
      * 
* * .google.rpc.Status error = 1; @@ -956,8 +956,8 @@ public com.google.rpc.Status getError() { * * *
-     * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-     * message that specifies the error for the operation.
+     * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+     * specifies the error for the operation.
      * 
* * .google.rpc.Status error = 1; @@ -979,8 +979,8 @@ public Builder setError(com.google.rpc.Status value) { * * *
-     * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-     * message that specifies the error for the operation.
+     * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+     * specifies the error for the operation.
      * 
* * .google.rpc.Status error = 1; @@ -999,8 +999,8 @@ public Builder setError(com.google.rpc.Status.Builder builderForValue) { * * *
-     * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-     * message that specifies the error for the operation.
+     * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+     * specifies the error for the operation.
      * 
* * .google.rpc.Status error = 1; @@ -1023,8 +1023,8 @@ public Builder mergeError(com.google.rpc.Status value) { * * *
-     * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-     * message that specifies the error for the operation.
+     * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+     * specifies the error for the operation.
      * 
* * .google.rpc.Status error = 1; @@ -1044,8 +1044,8 @@ public Builder clearError() { * * *
-     * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-     * message that specifies the error for the operation.
+     * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+     * specifies the error for the operation.
      * 
* * .google.rpc.Status error = 1; @@ -1059,8 +1059,8 @@ public com.google.rpc.Status.Builder getErrorBuilder() { * * *
-     * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-     * message that specifies the error for the operation.
+     * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+     * specifies the error for the operation.
      * 
* * .google.rpc.Status error = 1; @@ -1076,8 +1076,8 @@ public com.google.rpc.StatusOrBuilder getErrorOrBuilder() { * * *
-     * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-     * message that specifies the error for the operation.
+     * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+     * specifies the error for the operation.
      * 
* * .google.rpc.Status error = 1; @@ -1118,7 +1118,7 @@ private void ensureResultsIsMutable() { * * *
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1137,7 +1137,7 @@ public java.util.List get
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1156,7 +1156,7 @@ public int getResultsCount() {
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1175,7 +1175,7 @@ public com.google.cloud.speech.v1.StreamingRecognitionResult getResults(int inde
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1201,7 +1201,7 @@ public Builder setResults(
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1224,7 +1224,7 @@ public Builder setResults(
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1249,7 +1249,7 @@ public Builder addResults(com.google.cloud.speech.v1.StreamingRecognitionResult
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1275,7 +1275,7 @@ public Builder addResults(
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1298,7 +1298,7 @@ public Builder addResults(
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1321,7 +1321,7 @@ public Builder addResults(
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1345,7 +1345,7 @@ public Builder addAllResults(
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1367,7 +1367,7 @@ public Builder clearResults() {
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1389,7 +1389,7 @@ public Builder removeResults(int index) {
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1405,7 +1405,7 @@ public com.google.cloud.speech.v1.StreamingRecognitionResult.Builder getResultsB
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1425,7 +1425,7 @@ public com.google.cloud.speech.v1.StreamingRecognitionResultOrBuilder getResults
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1445,7 +1445,7 @@ public com.google.cloud.speech.v1.StreamingRecognitionResultOrBuilder getResults
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1461,7 +1461,7 @@ public com.google.cloud.speech.v1.StreamingRecognitionResult.Builder addResultsB
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1479,7 +1479,7 @@ public com.google.cloud.speech.v1.StreamingRecognitionResult.Builder addResultsB
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1514,7 +1514,7 @@ public com.google.cloud.speech.v1.StreamingRecognitionResult.Builder addResultsB
      *
      *
      * 
-     * Output only. Indicates the type of speech event.
+     * Indicates the type of speech event.
      * 
* * @@ -1528,7 +1528,7 @@ public int getSpeechEventTypeValue() { * * *
-     * Output only. Indicates the type of speech event.
+     * Indicates the type of speech event.
      * 
* * @@ -1544,7 +1544,7 @@ public Builder setSpeechEventTypeValue(int value) { * * *
-     * Output only. Indicates the type of speech event.
+     * Indicates the type of speech event.
      * 
* * @@ -1565,7 +1565,7 @@ public Builder setSpeechEventTypeValue(int value) { * * *
-     * Output only. Indicates the type of speech event.
+     * Indicates the type of speech event.
      * 
* * @@ -1586,7 +1586,7 @@ public Builder setSpeechEventType( * * *
-     * Output only. Indicates the type of speech event.
+     * Indicates the type of speech event.
      * 
* * diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognizeResponseOrBuilder.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognizeResponseOrBuilder.java index ee5e8870b..18cc04445 100644 --- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognizeResponseOrBuilder.java +++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognizeResponseOrBuilder.java @@ -27,8 +27,8 @@ public interface StreamingRecognizeResponseOrBuilder * * *
-   * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-   * message that specifies the error for the operation.
+   * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+   * specifies the error for the operation.
    * 
* * .google.rpc.Status error = 1; @@ -38,8 +38,8 @@ public interface StreamingRecognizeResponseOrBuilder * * *
-   * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-   * message that specifies the error for the operation.
+   * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+   * specifies the error for the operation.
    * 
* * .google.rpc.Status error = 1; @@ -49,8 +49,8 @@ public interface StreamingRecognizeResponseOrBuilder * * *
-   * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-   * message that specifies the error for the operation.
+   * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+   * specifies the error for the operation.
    * 
* * .google.rpc.Status error = 1; @@ -61,7 +61,7 @@ public interface StreamingRecognizeResponseOrBuilder * * *
-   * Output only. This repeated list contains zero or more results that
+   * This repeated list contains zero or more results that
    * correspond to consecutive portions of the audio currently being processed.
    * It contains zero or one `is_final=true` result (the newly settled portion),
    * followed by zero or more `is_final=false` results (the interim results).
@@ -74,7 +74,7 @@ public interface StreamingRecognizeResponseOrBuilder
    *
    *
    * 
-   * Output only. This repeated list contains zero or more results that
+   * This repeated list contains zero or more results that
    * correspond to consecutive portions of the audio currently being processed.
    * It contains zero or one `is_final=true` result (the newly settled portion),
    * followed by zero or more `is_final=false` results (the interim results).
@@ -87,7 +87,7 @@ public interface StreamingRecognizeResponseOrBuilder
    *
    *
    * 
-   * Output only. This repeated list contains zero or more results that
+   * This repeated list contains zero or more results that
    * correspond to consecutive portions of the audio currently being processed.
    * It contains zero or one `is_final=true` result (the newly settled portion),
    * followed by zero or more `is_final=false` results (the interim results).
@@ -100,7 +100,7 @@ public interface StreamingRecognizeResponseOrBuilder
    *
    *
    * 
-   * Output only. This repeated list contains zero or more results that
+   * This repeated list contains zero or more results that
    * correspond to consecutive portions of the audio currently being processed.
    * It contains zero or one `is_final=true` result (the newly settled portion),
    * followed by zero or more `is_final=false` results (the interim results).
@@ -114,7 +114,7 @@ public interface StreamingRecognizeResponseOrBuilder
    *
    *
    * 
-   * Output only. This repeated list contains zero or more results that
+   * This repeated list contains zero or more results that
    * correspond to consecutive portions of the audio currently being processed.
    * It contains zero or one `is_final=true` result (the newly settled portion),
    * followed by zero or more `is_final=false` results (the interim results).
@@ -128,7 +128,7 @@ public interface StreamingRecognizeResponseOrBuilder
    *
    *
    * 
-   * Output only. Indicates the type of speech event.
+   * Indicates the type of speech event.
    * 
* * .google.cloud.speech.v1.StreamingRecognizeResponse.SpeechEventType speech_event_type = 4; @@ -139,7 +139,7 @@ public interface StreamingRecognizeResponseOrBuilder * * *
-   * Output only. Indicates the type of speech event.
+   * Indicates the type of speech event.
    * 
* * .google.cloud.speech.v1.StreamingRecognizeResponse.SpeechEventType speech_event_type = 4; diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/WordInfo.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/WordInfo.java index 4290f5647..1dfbb4424 100644 --- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/WordInfo.java +++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/WordInfo.java @@ -142,7 +142,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-   * Output only. Time offset relative to the beginning of the audio,
+   * Time offset relative to the beginning of the audio,
    * and corresponding to the start of the spoken word.
    * This field is only set if `enable_word_time_offsets=true` and only
    * in the top hypothesis.
@@ -159,7 +159,7 @@ public boolean hasStartTime() {
    *
    *
    * 
-   * Output only. Time offset relative to the beginning of the audio,
+   * Time offset relative to the beginning of the audio,
    * and corresponding to the start of the spoken word.
    * This field is only set if `enable_word_time_offsets=true` and only
    * in the top hypothesis.
@@ -176,7 +176,7 @@ public com.google.protobuf.Duration getStartTime() {
    *
    *
    * 
-   * Output only. Time offset relative to the beginning of the audio,
+   * Time offset relative to the beginning of the audio,
    * and corresponding to the start of the spoken word.
    * This field is only set if `enable_word_time_offsets=true` and only
    * in the top hypothesis.
@@ -196,7 +196,7 @@ public com.google.protobuf.DurationOrBuilder getStartTimeOrBuilder() {
    *
    *
    * 
-   * Output only. Time offset relative to the beginning of the audio,
+   * Time offset relative to the beginning of the audio,
    * and corresponding to the end of the spoken word.
    * This field is only set if `enable_word_time_offsets=true` and only
    * in the top hypothesis.
@@ -213,7 +213,7 @@ public boolean hasEndTime() {
    *
    *
    * 
-   * Output only. Time offset relative to the beginning of the audio,
+   * Time offset relative to the beginning of the audio,
    * and corresponding to the end of the spoken word.
    * This field is only set if `enable_word_time_offsets=true` and only
    * in the top hypothesis.
@@ -230,7 +230,7 @@ public com.google.protobuf.Duration getEndTime() {
    *
    *
    * 
-   * Output only. Time offset relative to the beginning of the audio,
+   * Time offset relative to the beginning of the audio,
    * and corresponding to the end of the spoken word.
    * This field is only set if `enable_word_time_offsets=true` and only
    * in the top hypothesis.
@@ -250,7 +250,7 @@ public com.google.protobuf.DurationOrBuilder getEndTimeOrBuilder() {
    *
    *
    * 
-   * Output only. The word corresponding to this set of information.
+   * The word corresponding to this set of information.
    * 
* * string word = 3; @@ -270,7 +270,7 @@ public java.lang.String getWord() { * * *
-   * Output only. The word corresponding to this set of information.
+   * The word corresponding to this set of information.
    * 
* * string word = 3; @@ -666,7 +666,7 @@ public Builder mergeFrom( * * *
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the start of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -683,7 +683,7 @@ public boolean hasStartTime() {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the start of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -704,7 +704,7 @@ public com.google.protobuf.Duration getStartTime() {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the start of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -731,7 +731,7 @@ public Builder setStartTime(com.google.protobuf.Duration value) {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the start of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -755,7 +755,7 @@ public Builder setStartTime(com.google.protobuf.Duration.Builder builderForValue
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the start of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -784,7 +784,7 @@ public Builder mergeStartTime(com.google.protobuf.Duration value) {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the start of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -809,7 +809,7 @@ public Builder clearStartTime() {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the start of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -828,7 +828,7 @@ public com.google.protobuf.Duration.Builder getStartTimeBuilder() {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the start of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -849,7 +849,7 @@ public com.google.protobuf.DurationOrBuilder getStartTimeOrBuilder() {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the start of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -886,7 +886,7 @@ public com.google.protobuf.DurationOrBuilder getStartTimeOrBuilder() {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the end of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -903,7 +903,7 @@ public boolean hasEndTime() {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the end of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -924,7 +924,7 @@ public com.google.protobuf.Duration getEndTime() {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the end of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -951,7 +951,7 @@ public Builder setEndTime(com.google.protobuf.Duration value) {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the end of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -975,7 +975,7 @@ public Builder setEndTime(com.google.protobuf.Duration.Builder builderForValue)
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the end of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -1004,7 +1004,7 @@ public Builder mergeEndTime(com.google.protobuf.Duration value) {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the end of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -1029,7 +1029,7 @@ public Builder clearEndTime() {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the end of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -1048,7 +1048,7 @@ public com.google.protobuf.Duration.Builder getEndTimeBuilder() {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the end of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -1069,7 +1069,7 @@ public com.google.protobuf.DurationOrBuilder getEndTimeOrBuilder() {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the end of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -1101,7 +1101,7 @@ public com.google.protobuf.DurationOrBuilder getEndTimeOrBuilder() {
      *
      *
      * 
-     * Output only. The word corresponding to this set of information.
+     * The word corresponding to this set of information.
      * 
* * string word = 3; @@ -1121,7 +1121,7 @@ public java.lang.String getWord() { * * *
-     * Output only. The word corresponding to this set of information.
+     * The word corresponding to this set of information.
      * 
* * string word = 3; @@ -1141,7 +1141,7 @@ public com.google.protobuf.ByteString getWordBytes() { * * *
-     * Output only. The word corresponding to this set of information.
+     * The word corresponding to this set of information.
      * 
* * string word = 3; @@ -1159,7 +1159,7 @@ public Builder setWord(java.lang.String value) { * * *
-     * Output only. The word corresponding to this set of information.
+     * The word corresponding to this set of information.
      * 
* * string word = 3; @@ -1174,7 +1174,7 @@ public Builder clearWord() { * * *
-     * Output only. The word corresponding to this set of information.
+     * The word corresponding to this set of information.
      * 
* * string word = 3; diff --git a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/WordInfoOrBuilder.java b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/WordInfoOrBuilder.java index a6f7f62ab..c2c671c52 100644 --- a/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/WordInfoOrBuilder.java +++ b/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/WordInfoOrBuilder.java @@ -27,7 +27,7 @@ public interface WordInfoOrBuilder * * *
-   * Output only. Time offset relative to the beginning of the audio,
+   * Time offset relative to the beginning of the audio,
    * and corresponding to the start of the spoken word.
    * This field is only set if `enable_word_time_offsets=true` and only
    * in the top hypothesis.
@@ -42,7 +42,7 @@ public interface WordInfoOrBuilder
    *
    *
    * 
-   * Output only. Time offset relative to the beginning of the audio,
+   * Time offset relative to the beginning of the audio,
    * and corresponding to the start of the spoken word.
    * This field is only set if `enable_word_time_offsets=true` and only
    * in the top hypothesis.
@@ -57,7 +57,7 @@ public interface WordInfoOrBuilder
    *
    *
    * 
-   * Output only. Time offset relative to the beginning of the audio,
+   * Time offset relative to the beginning of the audio,
    * and corresponding to the start of the spoken word.
    * This field is only set if `enable_word_time_offsets=true` and only
    * in the top hypothesis.
@@ -73,7 +73,7 @@ public interface WordInfoOrBuilder
    *
    *
    * 
-   * Output only. Time offset relative to the beginning of the audio,
+   * Time offset relative to the beginning of the audio,
    * and corresponding to the end of the spoken word.
    * This field is only set if `enable_word_time_offsets=true` and only
    * in the top hypothesis.
@@ -88,7 +88,7 @@ public interface WordInfoOrBuilder
    *
    *
    * 
-   * Output only. Time offset relative to the beginning of the audio,
+   * Time offset relative to the beginning of the audio,
    * and corresponding to the end of the spoken word.
    * This field is only set if `enable_word_time_offsets=true` and only
    * in the top hypothesis.
@@ -103,7 +103,7 @@ public interface WordInfoOrBuilder
    *
    *
    * 
-   * Output only. Time offset relative to the beginning of the audio,
+   * Time offset relative to the beginning of the audio,
    * and corresponding to the end of the spoken word.
    * This field is only set if `enable_word_time_offsets=true` and only
    * in the top hypothesis.
@@ -119,7 +119,7 @@ public interface WordInfoOrBuilder
    *
    *
    * 
-   * Output only. The word corresponding to this set of information.
+   * The word corresponding to this set of information.
    * 
* * string word = 3; @@ -129,7 +129,7 @@ public interface WordInfoOrBuilder * * *
-   * Output only. The word corresponding to this set of information.
+   * The word corresponding to this set of information.
    * 
* * string word = 3; diff --git a/proto-google-cloud-speech-v1/src/main/proto/google/cloud/speech/v1/cloud_speech.proto b/proto-google-cloud-speech-v1/src/main/proto/google/cloud/speech/v1/cloud_speech.proto index 30e540cbb..0887915a1 100644 --- a/proto-google-cloud-speech-v1/src/main/proto/google/cloud/speech/v1/cloud_speech.proto +++ b/proto-google-cloud-speech-v1/src/main/proto/google/cloud/speech/v1/cloud_speech.proto @@ -1,4 +1,4 @@ -// Copyright 2018 Google LLC. +// Copyright 2019 Google LLC. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,7 +18,10 @@ syntax = "proto3"; package google.cloud.speech.v1; import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; import "google/longrunning/operations.proto"; +import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; import "google/rpc/status.proto"; @@ -28,9 +31,13 @@ option go_package = "google.golang.org/genproto/googleapis/cloud/speech/v1;speec option java_multiple_files = true; option java_outer_classname = "SpeechProto"; option java_package = "com.google.cloud.speech.v1"; +option objc_class_prefix = "GCS"; // Service that implements Google Cloud Speech API. service Speech { + option (google.api.default_host) = "speech.googleapis.com"; + option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + // Performs synchronous speech recognition: receive results after all audio // has been sent and processed. rpc Recognize(RecognizeRequest) returns (RecognizeResponse) { @@ -38,52 +45,59 @@ service Speech { post: "/v1/speech:recognize" body: "*" }; + option (google.api.method_signature) = "config,audio"; } // Performs asynchronous speech recognition: receive results via the // google.longrunning.Operations interface. Returns either an // `Operation.error` or an `Operation.response` which contains // a `LongRunningRecognizeResponse` message. - rpc LongRunningRecognize(LongRunningRecognizeRequest) - returns (google.longrunning.Operation) { + // For more information on asynchronous speech recognition, see the + // [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize). + rpc LongRunningRecognize(LongRunningRecognizeRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1/speech:longrunningrecognize" body: "*" }; + option (google.api.method_signature) = "config,audio"; + option (google.longrunning.operation_info) = { + response_type: "LongRunningRecognizeResponse" + metadata_type: "LongRunningRecognizeMetadata" + }; } // Performs bidirectional streaming speech recognition: receive results while // sending audio. This method is only available via the gRPC API (not REST). - rpc StreamingRecognize(stream StreamingRecognizeRequest) - returns (stream StreamingRecognizeResponse) {} + rpc StreamingRecognize(stream StreamingRecognizeRequest) returns (stream StreamingRecognizeResponse) { + } } // The top-level message sent by the client for the `Recognize` method. message RecognizeRequest { - // *Required* Provides information to the recognizer that specifies how to + // Required. Provides information to the recognizer that specifies how to // process the request. - RecognitionConfig config = 1; + RecognitionConfig config = 1 [(google.api.field_behavior) = REQUIRED]; - // *Required* The audio data to be recognized. - RecognitionAudio audio = 2; + // Required. The audio data to be recognized. + RecognitionAudio audio = 2 [(google.api.field_behavior) = REQUIRED]; } // The top-level message sent by the client for the `LongRunningRecognize` // method. message LongRunningRecognizeRequest { - // *Required* Provides information to the recognizer that specifies how to + // Required. Provides information to the recognizer that specifies how to // process the request. - RecognitionConfig config = 1; + RecognitionConfig config = 1 [(google.api.field_behavior) = REQUIRED]; - // *Required* The audio data to be recognized. - RecognitionAudio audio = 2; + // Required. The audio data to be recognized. + RecognitionAudio audio = 2 [(google.api.field_behavior) = REQUIRED]; } // The top-level message sent by the client for the `StreamingRecognize` method. // Multiple `StreamingRecognizeRequest` messages are sent. The first message -// must contain a `streaming_config` message and must not contain `audio` data. -// All subsequent messages must contain `audio` data and must not contain a -// `streaming_config` message. +// must contain a `streaming_config` message and must not contain +// `audio_content`. All subsequent messages must contain `audio_content` and +// must not contain a `streaming_config` message. message StreamingRecognizeRequest { // The streaming request, which is either a streaming config or audio content. oneof streaming_request { @@ -97,9 +111,9 @@ message StreamingRecognizeRequest { // `StreamingRecognizeRequest` message must not contain `audio_content` data // and all subsequent `StreamingRecognizeRequest` messages must contain // `audio_content` data. The audio bytes must be encoded as specified in - // `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a + // `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a // pure binary representation (not base64). See - // [content limits](/speech-to-text/quotas#content). + // [content limits](https://cloud.google.com/speech-to-text/quotas#content). bytes audio_content = 2; } } @@ -107,11 +121,11 @@ message StreamingRecognizeRequest { // Provides information to the recognizer that specifies how to process the // request. message StreamingRecognitionConfig { - // *Required* Provides information to the recognizer that specifies how to + // Required. Provides information to the recognizer that specifies how to // process the request. - RecognitionConfig config = 1; + RecognitionConfig config = 1 [(google.api.field_behavior) = REQUIRED]; - // *Optional* If `false` or omitted, the recognizer will perform continuous + // If `false` or omitted, the recognizer will perform continuous // recognition (continuing to wait for and process audio even if the user // pauses speaking) until the client closes the input stream (gRPC API) or // until the maximum time limit has been reached. May return multiple @@ -124,7 +138,7 @@ message StreamingRecognitionConfig { // `true`. bool single_utterance = 2; - // *Optional* If `true`, interim results (tentative hypotheses) may be + // If `true`, interim results (tentative hypotheses) may be // returned as they become available (these interim results are indicated with // the `is_final=false` flag). // If `false` or omitted, only `is_final=true` result(s) are returned. @@ -136,13 +150,15 @@ message StreamingRecognitionConfig { message RecognitionConfig { // The encoding of the audio data sent in the request. // - // All encodings support only 1 channel (mono) audio. + // All encodings support only 1 channel (mono) audio, unless the + // `audio_channel_count` and `enable_separate_recognition_per_channel` fields + // are set. // // For best results, the audio source should be captured and transmitted using // a lossless encoding (`FLAC` or `LINEAR16`). The accuracy of the speech // recognition can be reduced if lossy codecs are used to capture or transmit // audio, particularly if background noise is present. Lossy codecs include - // `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, and `SPEEX_WITH_HEADER_BYTE`. + // `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, `SPEEX_WITH_HEADER_BYTE`, and `MP3`. // // The `FLAC` and `WAV` audio file formats include a header that describes the // included audio content. You can request recognition for `WAV` files that @@ -153,8 +169,7 @@ message RecognitionConfig { // an `AudioEncoding` when you send send `FLAC` or `WAV` audio, the // encoding configuration must match the encoding described in the audio // header; otherwise the request returns an - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error - // code. + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error code. enum AudioEncoding { // Not specified. ENCODING_UNSPECIFIED = 0; @@ -202,8 +217,7 @@ message RecognitionConfig { // Encoding of audio data sent in all `RecognitionAudio` messages. // This field is optional for `FLAC` and `WAV` audio files and required - // for all other audio formats. For details, see - // [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding]. + // for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding]. AudioEncoding encoding = 1; // Sample rate in Hertz of the audio data sent in all @@ -211,12 +225,11 @@ message RecognitionConfig { // 16000 is optimal. For best results, set the sampling rate of the audio // source to 16000 Hz. If that's not possible, use the native sample rate of // the audio source (instead of re-sampling). - // This field is optional for `FLAC` and `WAV` audio files and required - // for all other audio formats. For details, see - // [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding]. + // This field is optional for FLAC and WAV audio files, but is + // required for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding]. int32 sample_rate_hertz = 2; - // *Optional* The number of channels in the input audio data. + // The number of channels in the input audio data. // ONLY set this for MULTI-CHANNEL recognition. // Valid values for LINEAR16 and FLAC are `1`-`8`. // Valid values for OGG_OPUS are '1'-'254'. @@ -235,14 +248,15 @@ message RecognitionConfig { // `audio_channel_count` multiplied by the length of the audio. bool enable_separate_recognition_per_channel = 12; - // *Required* The language of the supplied audio as a + // Required. The language of the supplied audio as a // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. // Example: "en-US". - // See [Language Support](/speech-to-text/docs/languages) - // for a list of the currently supported language codes. - string language_code = 3; + // See [Language + // Support](https://cloud.google.com/speech-to-text/docs/languages) for a list + // of the currently supported language codes. + string language_code = 3 [(google.api.field_behavior) = REQUIRED]; - // *Optional* Maximum number of recognition hypotheses to be returned. + // Maximum number of recognition hypotheses to be returned. // Specifically, the maximum number of `SpeechRecognitionAlternative` messages // within each `SpeechRecognitionResult`. // The server may return fewer than `max_alternatives`. @@ -250,24 +264,26 @@ message RecognitionConfig { // one. If omitted, will return a maximum of one. int32 max_alternatives = 4; - // *Optional* If set to `true`, the server will attempt to filter out + // If set to `true`, the server will attempt to filter out // profanities, replacing all but the initial character in each filtered word // with asterisks, e.g. "f***". If set to `false` or omitted, profanities // won't be filtered out. bool profanity_filter = 5; - // *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext]. + // Array of [SpeechContext][google.cloud.speech.v1.SpeechContext]. // A means to provide context to assist the speech recognition. For more - // information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints). + // information, see + // [speech + // adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). repeated SpeechContext speech_contexts = 6; - // *Optional* If `true`, the top result includes a list of words and + // If `true`, the top result includes a list of words and // the start and end time offsets (timestamps) for those words. If // `false`, no word-level time offset information is returned. The default is // `false`. bool enable_word_time_offsets = 8; - // *Optional* If 'true', adds punctuation to recognition result hypotheses. + // If 'true', adds punctuation to recognition result hypotheses. // This feature is only available in select languages. Setting this for // requests in other languages has no effect at all. // The default 'false' value does not add punctuation to result hypotheses. @@ -276,7 +292,7 @@ message RecognitionConfig { // premium feature. bool enable_automatic_punctuation = 11; - // *Optional* Config to enable speaker diarization and set additional + // Config to enable speaker diarization and set additional // parameters to make diarization better suited for your application. // Note: When this is enabled, we send all the words from the beginning of the // audio for the top alternative in every consecutive STREAMING responses. @@ -286,10 +302,10 @@ message RecognitionConfig { // in the top alternative of the FINAL SpeechRecognitionResult. SpeakerDiarizationConfig diarization_config = 19; - // *Optional* Metadata regarding this request. + // Metadata regarding this request. RecognitionMetadata metadata = 9; - // *Optional* Which model to select for the given request. Select the model + // Which model to select for the given request. Select the model // best suited to your domain to get best results. If a model is not // explicitly specified, then we auto-select a model based on the parameters // in the RecognitionConfig. @@ -323,7 +339,7 @@ message RecognitionConfig { // string model = 13; - // *Optional* Set to true to use an enhanced model for speech recognition. + // Set to true to use an enhanced model for speech recognition. // If `use_enhanced` is set to true and the `model` field is not set, then // an appropriate enhanced model is chosen if an enhanced model exists for // the audio. @@ -334,34 +350,30 @@ message RecognitionConfig { bool use_enhanced = 14; } -// *Optional* Config to enable speaker diarization. +// Config to enable speaker diarization. message SpeakerDiarizationConfig { - // *Optional* If 'true', enables speaker detection for each recognized word in + // If 'true', enables speaker detection for each recognized word in // the top alternative of the recognition result using a speaker_tag provided // in the WordInfo. bool enable_speaker_diarization = 1; - // Note: Set min_speaker_count = max_speaker_count to fix the number of - // speakers to be detected in the audio. - - // *Optional* // Minimum number of speakers in the conversation. This range gives you more // flexibility by allowing the system to automatically determine the correct // number of speakers. If not set, the default value is 2. int32 min_speaker_count = 2; - // *Optional* // Maximum number of speakers in the conversation. This range gives you more // flexibility by allowing the system to automatically determine the correct // number of speakers. If not set, the default value is 6. int32 max_speaker_count = 3; - // Output only. A distinct integer value is assigned for every speaker within + // A distinct integer value is assigned for every speaker within // the audio. This field specifies which one of those speakers was detected to // have spoken this word. Value ranges from '1' to diarization_speaker_count. // speaker_tag is set if enable_speaker_diarization = 'true' and only in the // top alternative. - int32 speaker_tag = 5; + int32 speaker_tag = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; } // Description of audio data to be recognized. @@ -404,15 +416,6 @@ message RecognitionMetadata { DICTATION = 8; } - // The use case most closely describing the audio content to be recognized. - InteractionType interaction_type = 1; - - // The industry vertical to which this speech recognition request most - // closely applies. This is most indicative of the topics contained - // in the audio. Use the 6-digit NAICS code to identify the industry - // vertical - see https://www.naics.com/search/. - uint32 industry_naics_code_of_audio = 3; - // Enumerates the types of capture settings describing an audio file. enum MicrophoneDistance { // Audio type is not known. @@ -430,9 +433,6 @@ message RecognitionMetadata { FARFIELD = 3; } - // The audio type that most closely describes the audio being recognized. - MicrophoneDistance microphone_distance = 4; - // The original media the speech was recorded on. enum OriginalMediaType { // Unknown original media type. @@ -445,9 +445,6 @@ message RecognitionMetadata { VIDEO = 2; } - // The original media the speech was recorded on. - OriginalMediaType original_media_type = 5; - // The type of device the speech was recorded with. enum RecordingDeviceType { // The recording device is unknown. @@ -472,6 +469,21 @@ message RecognitionMetadata { OTHER_INDOOR_DEVICE = 6; } + // The use case most closely describing the audio content to be recognized. + InteractionType interaction_type = 1; + + // The industry vertical to which this speech recognition request most + // closely applies. This is most indicative of the topics contained + // in the audio. Use the 6-digit NAICS code to identify the industry + // vertical - see https://www.naics.com/search/. + uint32 industry_naics_code_of_audio = 3; + + // The audio type that most closely describes the audio being recognized. + MicrophoneDistance microphone_distance = 4; + + // The original media the speech was recorded on. + OriginalMediaType original_media_type = 5; + // The type of device the speech was recorded with. RecordingDeviceType recording_device_type = 6; @@ -494,25 +506,31 @@ message RecognitionMetadata { // Provides "hints" to the speech recognizer to favor specific words and phrases // in the results. message SpeechContext { - // *Optional* A list of strings containing words and phrases "hints" so that + // A list of strings containing words and phrases "hints" so that // the speech recognition is more likely to recognize them. This can be used // to improve the accuracy for specific words and phrases, for example, if // specific commands are typically spoken by the user. This can also be used // to add additional words to the vocabulary of the recognizer. See - // [usage limits](/speech-to-text/quotas#content). + // [usage limits](https://cloud.google.com/speech-to-text/quotas#content). + // + // List items can also be set to classes for groups of words that represent + // common concepts that occur in natural language. For example, rather than + // providing phrase hints for every month of the year, using the $MONTH class + // improves the likelihood of correctly transcribing audio that includes + // months. repeated string phrases = 1; } // Contains audio data in the encoding specified in the `RecognitionConfig`. // Either `content` or `uri` must be supplied. Supplying both or neither -// returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. -// See [content limits](/speech-to-text/quotas#content). +// returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. See +// [content limits](https://cloud.google.com/speech-to-text/quotas#content). message RecognitionAudio { // The audio source, which is either inline content or a Google Cloud // Storage uri. oneof audio_source { // The audio data bytes encoded as specified in - // `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a + // `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a // pure binary representation, whereas JSON representations use base64. bytes content = 1; @@ -521,9 +539,8 @@ message RecognitionAudio { // Currently, only Google Cloud Storage URIs are // supported, which must be specified in the following format: // `gs://bucket_name/object_name` (other URI formats return - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). - // For more information, see [Request - // URIs](https://cloud.google.com/storage/docs/reference-uris). + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](https://cloud.google.com/storage/docs/reference-uris). string uri = 2; } } @@ -532,7 +549,7 @@ message RecognitionAudio { // contains the result as zero or more sequential `SpeechRecognitionResult` // messages. message RecognizeResponse { - // Output only. Sequential list of transcription results corresponding to + // Sequential list of transcription results corresponding to // sequential portions of audio. repeated SpeechRecognitionResult results = 2; } @@ -543,7 +560,7 @@ message RecognizeResponse { // returned by the `GetOperation` call of the `google::longrunning::Operations` // service. message LongRunningRecognizeResponse { - // Output only. Sequential list of transcription results corresponding to + // Sequential list of transcription results corresponding to // sequential portions of audio. repeated SpeechRecognitionResult results = 2; } @@ -628,44 +645,44 @@ message StreamingRecognizeResponse { END_OF_SINGLE_UTTERANCE = 1; } - // Output only. If set, returns a [google.rpc.Status][google.rpc.Status] - // message that specifies the error for the operation. + // If set, returns a [google.rpc.Status][google.rpc.Status] message that + // specifies the error for the operation. google.rpc.Status error = 1; - // Output only. This repeated list contains zero or more results that + // This repeated list contains zero or more results that // correspond to consecutive portions of the audio currently being processed. // It contains zero or one `is_final=true` result (the newly settled portion), // followed by zero or more `is_final=false` results (the interim results). repeated StreamingRecognitionResult results = 2; - // Output only. Indicates the type of speech event. + // Indicates the type of speech event. SpeechEventType speech_event_type = 4; } // A streaming speech recognition result corresponding to a portion of the audio // that is currently being processed. message StreamingRecognitionResult { - // Output only. May contain one or more recognition hypotheses (up to the + // May contain one or more recognition hypotheses (up to the // maximum specified in `max_alternatives`). // These alternatives are ordered in terms of accuracy, with the top (first) // alternative being the most probable, as ranked by the recognizer. repeated SpeechRecognitionAlternative alternatives = 1; - // Output only. If `false`, this `StreamingRecognitionResult` represents an + // If `false`, this `StreamingRecognitionResult` represents an // interim result that may change. If `true`, this is the final time the // speech service will return this particular `StreamingRecognitionResult`, // the recognizer will not return any further hypotheses for this portion of // the transcript and corresponding audio. bool is_final = 2; - // Output only. An estimate of the likelihood that the recognizer will not + // An estimate of the likelihood that the recognizer will not // change its guess about this interim result. Values range from 0.0 // (completely unstable) to 1.0 (completely stable). // This field is only provided for interim results (`is_final=false`). // The default of 0.0 is a sentinel value indicating `stability` was not set. float stability = 3; - // Output only. Time offset of the end of this result relative to the + // Time offset of the end of this result relative to the // beginning of the audio. google.protobuf.Duration result_end_time = 4; @@ -674,16 +691,16 @@ message StreamingRecognitionResult { // For audio_channel_count = N, its output values can range from '1' to 'N'. int32 channel_tag = 5; - // Output only. The - // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the - // language in this result. This language code was detected to have the most - // likelihood of being spoken in the audio. - string language_code = 6; + // The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of + // the language in this result. This language code was detected to have the + // most likelihood of being spoken in the audio. + string language_code = 6 + [(google.api.field_behavior) = OUTPUT_ONLY]; } // A speech recognition result corresponding to a portion of the audio. message SpeechRecognitionResult { - // Output only. May contain one or more recognition hypotheses (up to the + // May contain one or more recognition hypotheses (up to the // maximum specified in `max_alternatives`). // These alternatives are ordered in terms of accuracy, with the top (first) // alternative being the most probable, as ranked by the recognizer. @@ -697,10 +714,10 @@ message SpeechRecognitionResult { // Alternative hypotheses (a.k.a. n-best list). message SpeechRecognitionAlternative { - // Output only. Transcript text representing the words that the user spoke. + // Transcript text representing the words that the user spoke. string transcript = 1; - // Output only. The confidence estimate between 0.0 and 1.0. A higher number + // The confidence estimate between 0.0 and 1.0. A higher number // indicates an estimated greater likelihood that the recognized words are // correct. This field is set only for the top alternative of a non-streaming // result or, of a streaming result where `is_final=true`. @@ -709,7 +726,7 @@ message SpeechRecognitionAlternative { // The default of 0.0 is a sentinel value indicating `confidence` was not set. float confidence = 2; - // Output only. A list of word-specific information for each recognized word. + // A list of word-specific information for each recognized word. // Note: When `enable_speaker_diarization` is true, you will see all the words // from the beginning of the audio. repeated WordInfo words = 3; @@ -717,7 +734,7 @@ message SpeechRecognitionAlternative { // Word-specific information for recognized words. message WordInfo { - // Output only. Time offset relative to the beginning of the audio, + // Time offset relative to the beginning of the audio, // and corresponding to the start of the spoken word. // This field is only set if `enable_word_time_offsets=true` and only // in the top hypothesis. @@ -725,7 +742,7 @@ message WordInfo { // vary. google.protobuf.Duration start_time = 1; - // Output only. Time offset relative to the beginning of the audio, + // Time offset relative to the beginning of the audio, // and corresponding to the end of the spoken word. // This field is only set if `enable_word_time_offsets=true` and only // in the top hypothesis. @@ -733,6 +750,6 @@ message WordInfo { // vary. google.protobuf.Duration end_time = 2; - // Output only. The word corresponding to this set of information. + // The word corresponding to this set of information. string word = 3; } diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/LongRunningRecognizeRequest.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/LongRunningRecognizeRequest.java index 5c9fdc152..9b681a161 100644 --- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/LongRunningRecognizeRequest.java +++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/LongRunningRecognizeRequest.java @@ -138,11 +138,13 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public boolean hasConfig() { return config_ != null; @@ -151,11 +153,13 @@ public boolean hasConfig() { * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1p1beta1.RecognitionConfig getConfig() { return config_ == null @@ -166,11 +170,13 @@ public com.google.cloud.speech.v1p1beta1.RecognitionConfig getConfig() { * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1p1beta1.RecognitionConfigOrBuilder getConfigOrBuilder() { return getConfig(); @@ -182,10 +188,12 @@ public com.google.cloud.speech.v1p1beta1.RecognitionConfigOrBuilder getConfigOrB * * *
-   * *Required* The audio data to be recognized.
+   * Required. The audio data to be recognized.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public boolean hasAudio() { return audio_ != null; @@ -194,10 +202,12 @@ public boolean hasAudio() { * * *
-   * *Required* The audio data to be recognized.
+   * Required. The audio data to be recognized.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1p1beta1.RecognitionAudio getAudio() { return audio_ == null @@ -208,10 +218,12 @@ public com.google.cloud.speech.v1p1beta1.RecognitionAudio getAudio() { * * *
-   * *Required* The audio data to be recognized.
+   * Required. The audio data to be recognized.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1p1beta1.RecognitionAudioOrBuilder getAudioOrBuilder() { return getAudio(); @@ -590,11 +602,13 @@ public Builder mergeFrom( * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public boolean hasConfig() { return configBuilder_ != null || config_ != null; @@ -603,11 +617,13 @@ public boolean hasConfig() { * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1p1beta1.RecognitionConfig getConfig() { if (configBuilder_ == null) { @@ -622,11 +638,13 @@ public com.google.cloud.speech.v1p1beta1.RecognitionConfig getConfig() { * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder setConfig(com.google.cloud.speech.v1p1beta1.RecognitionConfig value) { if (configBuilder_ == null) { @@ -645,11 +663,13 @@ public Builder setConfig(com.google.cloud.speech.v1p1beta1.RecognitionConfig val * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder setConfig( com.google.cloud.speech.v1p1beta1.RecognitionConfig.Builder builderForValue) { @@ -666,11 +686,13 @@ public Builder setConfig( * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder mergeConfig(com.google.cloud.speech.v1p1beta1.RecognitionConfig value) { if (configBuilder_ == null) { @@ -693,11 +715,13 @@ public Builder mergeConfig(com.google.cloud.speech.v1p1beta1.RecognitionConfig v * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder clearConfig() { if (configBuilder_ == null) { @@ -714,11 +738,13 @@ public Builder clearConfig() { * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1p1beta1.RecognitionConfig.Builder getConfigBuilder() { @@ -729,11 +755,13 @@ public com.google.cloud.speech.v1p1beta1.RecognitionConfig.Builder getConfigBuil * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1p1beta1.RecognitionConfigOrBuilder getConfigOrBuilder() { if (configBuilder_ != null) { @@ -748,11 +776,13 @@ public com.google.cloud.speech.v1p1beta1.RecognitionConfigOrBuilder getConfigOrB * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.speech.v1p1beta1.RecognitionConfig, @@ -781,10 +811,12 @@ public com.google.cloud.speech.v1p1beta1.RecognitionConfigOrBuilder getConfigOrB * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public boolean hasAudio() { return audioBuilder_ != null || audio_ != null; @@ -793,10 +825,12 @@ public boolean hasAudio() { * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1p1beta1.RecognitionAudio getAudio() { if (audioBuilder_ == null) { @@ -811,10 +845,12 @@ public com.google.cloud.speech.v1p1beta1.RecognitionAudio getAudio() { * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder setAudio(com.google.cloud.speech.v1p1beta1.RecognitionAudio value) { if (audioBuilder_ == null) { @@ -833,10 +869,12 @@ public Builder setAudio(com.google.cloud.speech.v1p1beta1.RecognitionAudio value * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder setAudio( com.google.cloud.speech.v1p1beta1.RecognitionAudio.Builder builderForValue) { @@ -853,10 +891,12 @@ public Builder setAudio( * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder mergeAudio(com.google.cloud.speech.v1p1beta1.RecognitionAudio value) { if (audioBuilder_ == null) { @@ -879,10 +919,12 @@ public Builder mergeAudio(com.google.cloud.speech.v1p1beta1.RecognitionAudio val * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder clearAudio() { if (audioBuilder_ == null) { @@ -899,10 +941,12 @@ public Builder clearAudio() { * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1p1beta1.RecognitionAudio.Builder getAudioBuilder() { @@ -913,10 +957,12 @@ public com.google.cloud.speech.v1p1beta1.RecognitionAudio.Builder getAudioBuilde * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1p1beta1.RecognitionAudioOrBuilder getAudioOrBuilder() { if (audioBuilder_ != null) { @@ -931,10 +977,12 @@ public com.google.cloud.speech.v1p1beta1.RecognitionAudioOrBuilder getAudioOrBui * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.speech.v1p1beta1.RecognitionAudio, diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/LongRunningRecognizeRequestOrBuilder.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/LongRunningRecognizeRequestOrBuilder.java index b28da6441..0038da09f 100644 --- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/LongRunningRecognizeRequestOrBuilder.java +++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/LongRunningRecognizeRequestOrBuilder.java @@ -27,33 +27,39 @@ public interface LongRunningRecognizeRequestOrBuilder * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ boolean hasConfig(); /** * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ com.google.cloud.speech.v1p1beta1.RecognitionConfig getConfig(); /** * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ com.google.cloud.speech.v1p1beta1.RecognitionConfigOrBuilder getConfigOrBuilder(); @@ -61,30 +67,36 @@ public interface LongRunningRecognizeRequestOrBuilder * * *
-   * *Required* The audio data to be recognized.
+   * Required. The audio data to be recognized.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ boolean hasAudio(); /** * * *
-   * *Required* The audio data to be recognized.
+   * Required. The audio data to be recognized.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ com.google.cloud.speech.v1p1beta1.RecognitionAudio getAudio(); /** * * *
-   * *Required* The audio data to be recognized.
+   * Required. The audio data to be recognized.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ com.google.cloud.speech.v1p1beta1.RecognitionAudioOrBuilder getAudioOrBuilder(); } diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/LongRunningRecognizeResponse.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/LongRunningRecognizeResponse.java index 8e72b0fce..4d64aab3f 100644 --- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/LongRunningRecognizeResponse.java +++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/LongRunningRecognizeResponse.java @@ -126,7 +126,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -140,7 +140,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -155,7 +155,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -168,7 +168,7 @@ public int getResultsCount() { * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -181,7 +181,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult getResults(int * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -582,7 +582,7 @@ private void ensureResultsIsMutable() { * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -600,7 +600,7 @@ private void ensureResultsIsMutable() { * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -617,7 +617,7 @@ public int getResultsCount() { * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -634,7 +634,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult getResults(int * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -658,7 +658,7 @@ public Builder setResults( * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -680,7 +680,7 @@ public Builder setResults( * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -703,7 +703,7 @@ public Builder addResults(com.google.cloud.speech.v1p1beta1.SpeechRecognitionRes * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -727,7 +727,7 @@ public Builder addResults( * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -748,7 +748,7 @@ public Builder addResults( * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -770,7 +770,7 @@ public Builder addResults( * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -792,7 +792,7 @@ public Builder addAllResults( * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -812,7 +812,7 @@ public Builder clearResults() { * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -832,7 +832,7 @@ public Builder removeResults(int index) { * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -846,7 +846,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult.Builder getResu * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -864,7 +864,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechRecognitionResultOrBuilder getRes * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -883,7 +883,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechRecognitionResultOrBuilder getRes * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -898,7 +898,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult.Builder addResu * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -915,7 +915,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult.Builder addResu * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/LongRunningRecognizeResponseOrBuilder.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/LongRunningRecognizeResponseOrBuilder.java index 1dc978b91..ac2f1e997 100644 --- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/LongRunningRecognizeResponseOrBuilder.java +++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/LongRunningRecognizeResponseOrBuilder.java @@ -27,7 +27,7 @@ public interface LongRunningRecognizeResponseOrBuilder * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -38,7 +38,7 @@ public interface LongRunningRecognizeResponseOrBuilder * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -49,7 +49,7 @@ public interface LongRunningRecognizeResponseOrBuilder * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -60,7 +60,7 @@ public interface LongRunningRecognizeResponseOrBuilder * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -72,7 +72,7 @@ public interface LongRunningRecognizeResponseOrBuilder * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionAudio.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionAudio.java index b507e7062..da7da597a 100644 --- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionAudio.java +++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionAudio.java @@ -24,8 +24,8 @@ *
  * Contains audio data in the encoding specified in the `RecognitionConfig`.
  * Either `content` or `uri` must be supplied. Supplying both or neither
- * returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT].
- * See [content limits](/speech-to-text/quotas#content).
+ * returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. See
+ * [content limits](https://cloud.google.com/speech-to-text/quotas#content).
  * 
* * Protobuf type {@code google.cloud.speech.v1p1beta1.RecognitionAudio} @@ -159,7 +159,7 @@ public AudioSourceCase getAudioSourceCase() { * *
    * The audio data bytes encoded as specified in
-   * `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a
+   * `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
    * pure binary representation, whereas JSON representations use base64.
    * 
* @@ -182,9 +182,8 @@ public com.google.protobuf.ByteString getContent() { * Currently, only Google Cloud Storage URIs are * supported, which must be specified in the following format: * `gs://bucket_name/object_name` (other URI formats return - * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). - * For more information, see [Request - * URIs](https://cloud.google.com/storage/docs/reference-uris). + * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). *
* * string uri = 2; @@ -214,9 +213,8 @@ public java.lang.String getUri() { * Currently, only Google Cloud Storage URIs are * supported, which must be specified in the following format: * `gs://bucket_name/object_name` (other URI formats return - * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). - * For more information, see [Request - * URIs](https://cloud.google.com/storage/docs/reference-uris). + * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). *
* * string uri = 2; @@ -431,8 +429,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build *
    * Contains audio data in the encoding specified in the `RecognitionConfig`.
    * Either `content` or `uri` must be supplied. Supplying both or neither
-   * returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT].
-   * See [content limits](/speech-to-text/quotas#content).
+   * returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. See
+   * [content limits](https://cloud.google.com/speech-to-text/quotas#content).
    * 
* * Protobuf type {@code google.cloud.speech.v1p1beta1.RecognitionAudio} @@ -626,7 +624,7 @@ public Builder clearAudioSource() { * *
      * The audio data bytes encoded as specified in
-     * `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a
+     * `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
      * pure binary representation, whereas JSON representations use base64.
      * 
* @@ -643,7 +641,7 @@ public com.google.protobuf.ByteString getContent() { * *
      * The audio data bytes encoded as specified in
-     * `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a
+     * `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
      * pure binary representation, whereas JSON representations use base64.
      * 
* @@ -663,7 +661,7 @@ public Builder setContent(com.google.protobuf.ByteString value) { * *
      * The audio data bytes encoded as specified in
-     * `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a
+     * `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
      * pure binary representation, whereas JSON representations use base64.
      * 
* @@ -687,9 +685,8 @@ public Builder clearContent() { * Currently, only Google Cloud Storage URIs are * supported, which must be specified in the following format: * `gs://bucket_name/object_name` (other URI formats return - * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). - * For more information, see [Request - * URIs](https://cloud.google.com/storage/docs/reference-uris). + * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). *
* * string uri = 2; @@ -719,9 +716,8 @@ public java.lang.String getUri() { * Currently, only Google Cloud Storage URIs are * supported, which must be specified in the following format: * `gs://bucket_name/object_name` (other URI formats return - * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). - * For more information, see [Request - * URIs](https://cloud.google.com/storage/docs/reference-uris). + * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). *
* * string uri = 2; @@ -751,9 +747,8 @@ public com.google.protobuf.ByteString getUriBytes() { * Currently, only Google Cloud Storage URIs are * supported, which must be specified in the following format: * `gs://bucket_name/object_name` (other URI formats return - * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). - * For more information, see [Request - * URIs](https://cloud.google.com/storage/docs/reference-uris). + * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). *
* * string uri = 2; @@ -776,9 +771,8 @@ public Builder setUri(java.lang.String value) { * Currently, only Google Cloud Storage URIs are * supported, which must be specified in the following format: * `gs://bucket_name/object_name` (other URI formats return - * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). - * For more information, see [Request - * URIs](https://cloud.google.com/storage/docs/reference-uris). + * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). *
* * string uri = 2; @@ -800,9 +794,8 @@ public Builder clearUri() { * Currently, only Google Cloud Storage URIs are * supported, which must be specified in the following format: * `gs://bucket_name/object_name` (other URI formats return - * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). - * For more information, see [Request - * URIs](https://cloud.google.com/storage/docs/reference-uris). + * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). *
* * string uri = 2; diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionAudioOrBuilder.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionAudioOrBuilder.java index 6bef6890d..045df4714 100644 --- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionAudioOrBuilder.java +++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionAudioOrBuilder.java @@ -28,7 +28,7 @@ public interface RecognitionAudioOrBuilder * *
    * The audio data bytes encoded as specified in
-   * `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a
+   * `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
    * pure binary representation, whereas JSON representations use base64.
    * 
* @@ -45,9 +45,8 @@ public interface RecognitionAudioOrBuilder * Currently, only Google Cloud Storage URIs are * supported, which must be specified in the following format: * `gs://bucket_name/object_name` (other URI formats return - * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). - * For more information, see [Request - * URIs](https://cloud.google.com/storage/docs/reference-uris). + * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). *
* * string uri = 2; @@ -62,9 +61,8 @@ public interface RecognitionAudioOrBuilder * Currently, only Google Cloud Storage URIs are * supported, which must be specified in the following format: * `gs://bucket_name/object_name` (other URI formats return - * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). - * For more information, see [Request - * URIs](https://cloud.google.com/storage/docs/reference-uris). + * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). *
* * string uri = 2; diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionConfig.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionConfig.java index 6bc4c43ba..69e5e8c55 100644 --- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionConfig.java +++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionConfig.java @@ -247,12 +247,14 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * *
    * The encoding of the audio data sent in the request.
-   * All encodings support only 1 channel (mono) audio.
+   * All encodings support only 1 channel (mono) audio, unless the
+   * `audio_channel_count` and `enable_separate_recognition_per_channel` fields
+   * are set.
    * For best results, the audio source should be captured and transmitted using
    * a lossless encoding (`FLAC` or `LINEAR16`). The accuracy of the speech
    * recognition can be reduced if lossy codecs are used to capture or transmit
    * audio, particularly if background noise is present. Lossy codecs include
-   * `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, and `SPEEX_WITH_HEADER_BYTE`.
+   * `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, `SPEEX_WITH_HEADER_BYTE`, and `MP3`.
    * The `FLAC` and `WAV` audio file formats include a header that describes the
    * included audio content. You can request recognition for `WAV` files that
    * contain either `LINEAR16` or `MULAW` encoded audio.
@@ -262,8 +264,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
    * an `AudioEncoding` when you send  send `FLAC` or `WAV` audio, the
    * encoding configuration must match the encoding described in the audio
    * header; otherwise the request returns an
-   * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error
-   * code.
+   * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error code.
    * 
* * Protobuf enum {@code google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding} @@ -589,8 +590,7 @@ private AudioEncoding(int value) { *
    * Encoding of audio data sent in all `RecognitionAudio` messages.
    * This field is optional for `FLAC` and `WAV` audio files and required
-   * for all other audio formats. For details, see
-   * [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
+   * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
    * 
* * .google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding encoding = 1; @@ -604,8 +604,7 @@ public int getEncodingValue() { *
    * Encoding of audio data sent in all `RecognitionAudio` messages.
    * This field is optional for `FLAC` and `WAV` audio files and required
-   * for all other audio formats. For details, see
-   * [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
+   * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
    * 
* * .google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding encoding = 1; @@ -630,9 +629,8 @@ public com.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding getEnco * 16000 is optimal. For best results, set the sampling rate of the audio * source to 16000 Hz. If that's not possible, use the native sample rate of * the audio source (instead of re-sampling). - * This field is optional for `FLAC` and `WAV` audio files and required - * for all other audio formats. For details, see - * [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding]. + * This field is optional for FLAC and WAV audio files, but is + * required for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding]. *
* * int32 sample_rate_hertz = 2; @@ -647,7 +645,7 @@ public int getSampleRateHertz() { * * *
-   * *Optional* The number of channels in the input audio data.
+   * The number of channels in the input audio data.
    * ONLY set this for MULTI-CHANNEL recognition.
    * Valid values for LINEAR16 and FLAC are `1`-`8`.
    * Valid values for OGG_OPUS are '1'-'254'.
@@ -670,7 +668,7 @@ public int getAudioChannelCount() {
    *
    *
    * 
-   * This needs to be set to ‘true’ explicitly and `audio_channel_count` > 1
+   * This needs to be set to `true` explicitly and `audio_channel_count` > 1
    * to get each channel recognized separately. The recognition result will
    * contain a `channel_tag` field to state which channel that result belongs
    * to. If this is not true, we will only recognize the first channel. The
@@ -690,14 +688,15 @@ public boolean getEnableSeparateRecognitionPerChannel() {
    *
    *
    * 
-   * *Required* The language of the supplied audio as a
+   * Required. The language of the supplied audio as a
    * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
    * Example: "en-US".
-   * See [Language Support](/speech-to-text/docs/languages)
-   * for a list of the currently supported language codes.
+   * See [Language
+   * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+   * of the currently supported language codes.
    * 
* - * string language_code = 3; + * string language_code = 3 [(.google.api.field_behavior) = REQUIRED]; */ public java.lang.String getLanguageCode() { java.lang.Object ref = languageCode_; @@ -714,14 +713,15 @@ public java.lang.String getLanguageCode() { * * *
-   * *Required* The language of the supplied audio as a
+   * Required. The language of the supplied audio as a
    * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
    * Example: "en-US".
-   * See [Language Support](/speech-to-text/docs/languages)
-   * for a list of the currently supported language codes.
+   * See [Language
+   * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+   * of the currently supported language codes.
    * 
* - * string language_code = 3; + * string language_code = 3 [(.google.api.field_behavior) = REQUIRED]; */ public com.google.protobuf.ByteString getLanguageCodeBytes() { java.lang.Object ref = languageCode_; @@ -741,17 +741,17 @@ public com.google.protobuf.ByteString getLanguageCodeBytes() { * * *
-   * *Optional* A list of up to 3 additional
+   * A list of up to 3 additional
    * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
    * listing possible alternative languages of the supplied audio.
-   * See [Language Support](/speech-to-text/docs/languages)
-   * for a list of the currently supported language codes.
-   * If alternative languages are listed, recognition result will contain
-   * recognition in the most likely language detected including the main
-   * language_code. The recognition result will include the language tag
-   * of the language detected in the audio.
-   * Note: This feature is only supported for Voice Command and Voice Search
-   * use cases and performance may vary for other use cases (e.g., phone call
+   * See [Language
+   * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+   * of the currently supported language codes. If alternative languages are
+   * listed, recognition result will contain recognition in the most likely
+   * language detected including the main language_code. The recognition result
+   * will include the language tag of the language detected in the audio. Note:
+   * This feature is only supported for Voice Command and Voice Search use cases
+   * and performance may vary for other use cases (e.g., phone call
    * transcription).
    * 
* @@ -764,17 +764,17 @@ public com.google.protobuf.ProtocolStringList getAlternativeLanguageCodesList() * * *
-   * *Optional* A list of up to 3 additional
+   * A list of up to 3 additional
    * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
    * listing possible alternative languages of the supplied audio.
-   * See [Language Support](/speech-to-text/docs/languages)
-   * for a list of the currently supported language codes.
-   * If alternative languages are listed, recognition result will contain
-   * recognition in the most likely language detected including the main
-   * language_code. The recognition result will include the language tag
-   * of the language detected in the audio.
-   * Note: This feature is only supported for Voice Command and Voice Search
-   * use cases and performance may vary for other use cases (e.g., phone call
+   * See [Language
+   * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+   * of the currently supported language codes. If alternative languages are
+   * listed, recognition result will contain recognition in the most likely
+   * language detected including the main language_code. The recognition result
+   * will include the language tag of the language detected in the audio. Note:
+   * This feature is only supported for Voice Command and Voice Search use cases
+   * and performance may vary for other use cases (e.g., phone call
    * transcription).
    * 
* @@ -787,17 +787,17 @@ public int getAlternativeLanguageCodesCount() { * * *
-   * *Optional* A list of up to 3 additional
+   * A list of up to 3 additional
    * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
    * listing possible alternative languages of the supplied audio.
-   * See [Language Support](/speech-to-text/docs/languages)
-   * for a list of the currently supported language codes.
-   * If alternative languages are listed, recognition result will contain
-   * recognition in the most likely language detected including the main
-   * language_code. The recognition result will include the language tag
-   * of the language detected in the audio.
-   * Note: This feature is only supported for Voice Command and Voice Search
-   * use cases and performance may vary for other use cases (e.g., phone call
+   * See [Language
+   * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+   * of the currently supported language codes. If alternative languages are
+   * listed, recognition result will contain recognition in the most likely
+   * language detected including the main language_code. The recognition result
+   * will include the language tag of the language detected in the audio. Note:
+   * This feature is only supported for Voice Command and Voice Search use cases
+   * and performance may vary for other use cases (e.g., phone call
    * transcription).
    * 
* @@ -810,17 +810,17 @@ public java.lang.String getAlternativeLanguageCodes(int index) { * * *
-   * *Optional* A list of up to 3 additional
+   * A list of up to 3 additional
    * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
    * listing possible alternative languages of the supplied audio.
-   * See [Language Support](/speech-to-text/docs/languages)
-   * for a list of the currently supported language codes.
-   * If alternative languages are listed, recognition result will contain
-   * recognition in the most likely language detected including the main
-   * language_code. The recognition result will include the language tag
-   * of the language detected in the audio.
-   * Note: This feature is only supported for Voice Command and Voice Search
-   * use cases and performance may vary for other use cases (e.g., phone call
+   * See [Language
+   * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+   * of the currently supported language codes. If alternative languages are
+   * listed, recognition result will contain recognition in the most likely
+   * language detected including the main language_code. The recognition result
+   * will include the language tag of the language detected in the audio. Note:
+   * This feature is only supported for Voice Command and Voice Search use cases
+   * and performance may vary for other use cases (e.g., phone call
    * transcription).
    * 
* @@ -836,7 +836,7 @@ public com.google.protobuf.ByteString getAlternativeLanguageCodesBytes(int index * * *
-   * *Optional* Maximum number of recognition hypotheses to be returned.
+   * Maximum number of recognition hypotheses to be returned.
    * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
    * within each `SpeechRecognitionResult`.
    * The server may return fewer than `max_alternatives`.
@@ -856,7 +856,7 @@ public int getMaxAlternatives() {
    *
    *
    * 
-   * *Optional* If set to `true`, the server will attempt to filter out
+   * If set to `true`, the server will attempt to filter out
    * profanities, replacing all but the initial character in each filtered word
    * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
    * won't be filtered out.
@@ -874,10 +874,11 @@ public boolean getProfanityFilter() {
    *
    *
    * 
-   * *Optional* array of
-   * [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-   * provide context to assist the speech recognition. For more information, see
-   * [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+   * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+   * A means to provide context to assist the speech recognition. For more
+   * information, see
+   * [speech
+   * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
    * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -889,10 +890,11 @@ public java.util.List getSpeech * * *
-   * *Optional* array of
-   * [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-   * provide context to assist the speech recognition. For more information, see
-   * [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+   * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+   * A means to provide context to assist the speech recognition. For more
+   * information, see
+   * [speech
+   * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
    * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -905,10 +907,11 @@ public java.util.List getSpeech * * *
-   * *Optional* array of
-   * [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-   * provide context to assist the speech recognition. For more information, see
-   * [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+   * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+   * A means to provide context to assist the speech recognition. For more
+   * information, see
+   * [speech
+   * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
    * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -920,10 +923,11 @@ public int getSpeechContextsCount() { * * *
-   * *Optional* array of
-   * [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-   * provide context to assist the speech recognition. For more information, see
-   * [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+   * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+   * A means to provide context to assist the speech recognition. For more
+   * information, see
+   * [speech
+   * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
    * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -935,10 +939,11 @@ public com.google.cloud.speech.v1p1beta1.SpeechContext getSpeechContexts(int ind * * *
-   * *Optional* array of
-   * [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-   * provide context to assist the speech recognition. For more information, see
-   * [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+   * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+   * A means to provide context to assist the speech recognition. For more
+   * information, see
+   * [speech
+   * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
    * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -954,7 +959,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechContextOrBuilder getSpeechContext * * *
-   * *Optional* If `true`, the top result includes a list of words and
+   * If `true`, the top result includes a list of words and
    * the start and end time offsets (timestamps) for those words. If
    * `false`, no word-level time offset information is returned. The default is
    * `false`.
@@ -972,7 +977,7 @@ public boolean getEnableWordTimeOffsets() {
    *
    *
    * 
-   * *Optional* If `true`, the top result includes a list of words and the
+   * If `true`, the top result includes a list of words and the
    * confidence for those words. If `false`, no word-level confidence
    * information is returned. The default is `false`.
    * 
@@ -989,7 +994,7 @@ public boolean getEnableWordConfidence() { * * *
-   * *Optional* If 'true', adds punctuation to recognition result hypotheses.
+   * If 'true', adds punctuation to recognition result hypotheses.
    * This feature is only available in select languages. Setting this for
    * requests in other languages has no effect at all.
    * The default 'false' value does not add punctuation to result hypotheses.
@@ -1010,7 +1015,7 @@ public boolean getEnableAutomaticPunctuation() {
    *
    *
    * 
-   * *Optional* If 'true', enables speaker detection for each recognized word in
+   * If 'true', enables speaker detection for each recognized word in
    * the top alternative of the recognition result using a speaker_tag provided
    * in the WordInfo.
    * Note: Use diarization_config instead.
@@ -1029,7 +1034,6 @@ public boolean getEnableSpeakerDiarization() {
    *
    *
    * 
-   * *Optional*
    * If set, specifies the estimated number of speakers in the conversation.
    * Defaults to '2'. Ignored unless enable_speaker_diarization is set to true.
    * Note: Use diarization_config instead.
@@ -1048,7 +1052,7 @@ public int getDiarizationSpeakerCount() {
    *
    *
    * 
-   * *Optional* Config to enable speaker diarization and set additional
+   * Config to enable speaker diarization and set additional
    * parameters to make diarization better suited for your application.
    * Note: When this is enabled, we send all the words from the beginning of the
    * audio for the top alternative in every consecutive STREAMING responses.
@@ -1067,7 +1071,7 @@ public boolean hasDiarizationConfig() {
    *
    *
    * 
-   * *Optional* Config to enable speaker diarization and set additional
+   * Config to enable speaker diarization and set additional
    * parameters to make diarization better suited for your application.
    * Note: When this is enabled, we send all the words from the beginning of the
    * audio for the top alternative in every consecutive STREAMING responses.
@@ -1088,7 +1092,7 @@ public com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig getDiarization
    *
    *
    * 
-   * *Optional* Config to enable speaker diarization and set additional
+   * Config to enable speaker diarization and set additional
    * parameters to make diarization better suited for your application.
    * Note: When this is enabled, we send all the words from the beginning of the
    * audio for the top alternative in every consecutive STREAMING responses.
@@ -1111,7 +1115,7 @@ public com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig getDiarization
    *
    *
    * 
-   * *Optional* Metadata regarding this request.
+   * Metadata regarding this request.
    * 
* * .google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9; @@ -1123,7 +1127,7 @@ public boolean hasMetadata() { * * *
-   * *Optional* Metadata regarding this request.
+   * Metadata regarding this request.
    * 
* * .google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9; @@ -1137,7 +1141,7 @@ public com.google.cloud.speech.v1p1beta1.RecognitionMetadata getMetadata() { * * *
-   * *Optional* Metadata regarding this request.
+   * Metadata regarding this request.
    * 
* * .google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9; @@ -1152,7 +1156,7 @@ public com.google.cloud.speech.v1p1beta1.RecognitionMetadataOrBuilder getMetadat * * *
-   * *Optional* Which model to select for the given request. Select the model
+   * Which model to select for the given request. Select the model
    * best suited to your domain to get best results. If a model is not
    * explicitly specified, then we auto-select a model based on the parameters
    * in the RecognitionConfig.
@@ -1203,7 +1207,7 @@ public java.lang.String getModel() {
    *
    *
    * 
-   * *Optional* Which model to select for the given request. Select the model
+   * Which model to select for the given request. Select the model
    * best suited to your domain to get best results. If a model is not
    * explicitly specified, then we auto-select a model based on the parameters
    * in the RecognitionConfig.
@@ -1257,7 +1261,7 @@ public com.google.protobuf.ByteString getModelBytes() {
    *
    *
    * 
-   * *Optional* Set to true to use an enhanced model for speech recognition.
+   * Set to true to use an enhanced model for speech recognition.
    * If `use_enhanced` is set to true and the `model` field is not set, then
    * an appropriate enhanced model is chosen if an enhanced model exists for
    * the audio.
@@ -1955,8 +1959,7 @@ public Builder mergeFrom(
      * 
      * Encoding of audio data sent in all `RecognitionAudio` messages.
      * This field is optional for `FLAC` and `WAV` audio files and required
-     * for all other audio formats. For details, see
-     * [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
+     * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
      * 
* * .google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding encoding = 1; @@ -1970,8 +1973,7 @@ public int getEncodingValue() { *
      * Encoding of audio data sent in all `RecognitionAudio` messages.
      * This field is optional for `FLAC` and `WAV` audio files and required
-     * for all other audio formats. For details, see
-     * [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
+     * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
      * 
* * .google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding encoding = 1; @@ -1987,8 +1989,7 @@ public Builder setEncodingValue(int value) { *
      * Encoding of audio data sent in all `RecognitionAudio` messages.
      * This field is optional for `FLAC` and `WAV` audio files and required
-     * for all other audio formats. For details, see
-     * [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
+     * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
      * 
* * .google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding encoding = 1; @@ -2007,8 +2008,7 @@ public com.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding getEnco *
      * Encoding of audio data sent in all `RecognitionAudio` messages.
      * This field is optional for `FLAC` and `WAV` audio files and required
-     * for all other audio formats. For details, see
-     * [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
+     * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
      * 
* * .google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding encoding = 1; @@ -2029,8 +2029,7 @@ public Builder setEncoding( *
      * Encoding of audio data sent in all `RecognitionAudio` messages.
      * This field is optional for `FLAC` and `WAV` audio files and required
-     * for all other audio formats. For details, see
-     * [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
+     * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
      * 
* * .google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding encoding = 1; @@ -2052,9 +2051,8 @@ public Builder clearEncoding() { * 16000 is optimal. For best results, set the sampling rate of the audio * source to 16000 Hz. If that's not possible, use the native sample rate of * the audio source (instead of re-sampling). - * This field is optional for `FLAC` and `WAV` audio files and required - * for all other audio formats. For details, see - * [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding]. + * This field is optional for FLAC and WAV audio files, but is + * required for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding]. *
* * int32 sample_rate_hertz = 2; @@ -2071,9 +2069,8 @@ public int getSampleRateHertz() { * 16000 is optimal. For best results, set the sampling rate of the audio * source to 16000 Hz. If that's not possible, use the native sample rate of * the audio source (instead of re-sampling). - * This field is optional for `FLAC` and `WAV` audio files and required - * for all other audio formats. For details, see - * [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding]. + * This field is optional for FLAC and WAV audio files, but is + * required for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding]. *
* * int32 sample_rate_hertz = 2; @@ -2093,9 +2090,8 @@ public Builder setSampleRateHertz(int value) { * 16000 is optimal. For best results, set the sampling rate of the audio * source to 16000 Hz. If that's not possible, use the native sample rate of * the audio source (instead of re-sampling). - * This field is optional for `FLAC` and `WAV` audio files and required - * for all other audio formats. For details, see - * [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding]. + * This field is optional for FLAC and WAV audio files, but is + * required for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding]. *
* * int32 sample_rate_hertz = 2; @@ -2112,7 +2108,7 @@ public Builder clearSampleRateHertz() { * * *
-     * *Optional* The number of channels in the input audio data.
+     * The number of channels in the input audio data.
      * ONLY set this for MULTI-CHANNEL recognition.
      * Valid values for LINEAR16 and FLAC are `1`-`8`.
      * Valid values for OGG_OPUS are '1'-'254'.
@@ -2132,7 +2128,7 @@ public int getAudioChannelCount() {
      *
      *
      * 
-     * *Optional* The number of channels in the input audio data.
+     * The number of channels in the input audio data.
      * ONLY set this for MULTI-CHANNEL recognition.
      * Valid values for LINEAR16 and FLAC are `1`-`8`.
      * Valid values for OGG_OPUS are '1'-'254'.
@@ -2155,7 +2151,7 @@ public Builder setAudioChannelCount(int value) {
      *
      *
      * 
-     * *Optional* The number of channels in the input audio data.
+     * The number of channels in the input audio data.
      * ONLY set this for MULTI-CHANNEL recognition.
      * Valid values for LINEAR16 and FLAC are `1`-`8`.
      * Valid values for OGG_OPUS are '1'-'254'.
@@ -2180,7 +2176,7 @@ public Builder clearAudioChannelCount() {
      *
      *
      * 
-     * This needs to be set to ‘true’ explicitly and `audio_channel_count` > 1
+     * This needs to be set to `true` explicitly and `audio_channel_count` > 1
      * to get each channel recognized separately. The recognition result will
      * contain a `channel_tag` field to state which channel that result belongs
      * to. If this is not true, we will only recognize the first channel. The
@@ -2197,7 +2193,7 @@ public boolean getEnableSeparateRecognitionPerChannel() {
      *
      *
      * 
-     * This needs to be set to ‘true’ explicitly and `audio_channel_count` > 1
+     * This needs to be set to `true` explicitly and `audio_channel_count` > 1
      * to get each channel recognized separately. The recognition result will
      * contain a `channel_tag` field to state which channel that result belongs
      * to. If this is not true, we will only recognize the first channel. The
@@ -2217,7 +2213,7 @@ public Builder setEnableSeparateRecognitionPerChannel(boolean value) {
      *
      *
      * 
-     * This needs to be set to ‘true’ explicitly and `audio_channel_count` > 1
+     * This needs to be set to `true` explicitly and `audio_channel_count` > 1
      * to get each channel recognized separately. The recognition result will
      * contain a `channel_tag` field to state which channel that result belongs
      * to. If this is not true, we will only recognize the first channel. The
@@ -2239,14 +2235,15 @@ public Builder clearEnableSeparateRecognitionPerChannel() {
      *
      *
      * 
-     * *Required* The language of the supplied audio as a
+     * Required. The language of the supplied audio as a
      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
      * Example: "en-US".
-     * See [Language Support](/speech-to-text/docs/languages)
-     * for a list of the currently supported language codes.
+     * See [Language
+     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+     * of the currently supported language codes.
      * 
* - * string language_code = 3; + * string language_code = 3 [(.google.api.field_behavior) = REQUIRED]; */ public java.lang.String getLanguageCode() { java.lang.Object ref = languageCode_; @@ -2263,14 +2260,15 @@ public java.lang.String getLanguageCode() { * * *
-     * *Required* The language of the supplied audio as a
+     * Required. The language of the supplied audio as a
      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
      * Example: "en-US".
-     * See [Language Support](/speech-to-text/docs/languages)
-     * for a list of the currently supported language codes.
+     * See [Language
+     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+     * of the currently supported language codes.
      * 
* - * string language_code = 3; + * string language_code = 3 [(.google.api.field_behavior) = REQUIRED]; */ public com.google.protobuf.ByteString getLanguageCodeBytes() { java.lang.Object ref = languageCode_; @@ -2287,14 +2285,15 @@ public com.google.protobuf.ByteString getLanguageCodeBytes() { * * *
-     * *Required* The language of the supplied audio as a
+     * Required. The language of the supplied audio as a
      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
      * Example: "en-US".
-     * See [Language Support](/speech-to-text/docs/languages)
-     * for a list of the currently supported language codes.
+     * See [Language
+     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+     * of the currently supported language codes.
      * 
* - * string language_code = 3; + * string language_code = 3 [(.google.api.field_behavior) = REQUIRED]; */ public Builder setLanguageCode(java.lang.String value) { if (value == null) { @@ -2309,14 +2308,15 @@ public Builder setLanguageCode(java.lang.String value) { * * *
-     * *Required* The language of the supplied audio as a
+     * Required. The language of the supplied audio as a
      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
      * Example: "en-US".
-     * See [Language Support](/speech-to-text/docs/languages)
-     * for a list of the currently supported language codes.
+     * See [Language
+     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+     * of the currently supported language codes.
      * 
* - * string language_code = 3; + * string language_code = 3 [(.google.api.field_behavior) = REQUIRED]; */ public Builder clearLanguageCode() { @@ -2328,14 +2328,15 @@ public Builder clearLanguageCode() { * * *
-     * *Required* The language of the supplied audio as a
+     * Required. The language of the supplied audio as a
      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
      * Example: "en-US".
-     * See [Language Support](/speech-to-text/docs/languages)
-     * for a list of the currently supported language codes.
+     * See [Language
+     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+     * of the currently supported language codes.
      * 
* - * string language_code = 3; + * string language_code = 3 [(.google.api.field_behavior) = REQUIRED]; */ public Builder setLanguageCodeBytes(com.google.protobuf.ByteString value) { if (value == null) { @@ -2362,17 +2363,17 @@ private void ensureAlternativeLanguageCodesIsMutable() { * * *
-     * *Optional* A list of up to 3 additional
+     * A list of up to 3 additional
      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
      * listing possible alternative languages of the supplied audio.
-     * See [Language Support](/speech-to-text/docs/languages)
-     * for a list of the currently supported language codes.
-     * If alternative languages are listed, recognition result will contain
-     * recognition in the most likely language detected including the main
-     * language_code. The recognition result will include the language tag
-     * of the language detected in the audio.
-     * Note: This feature is only supported for Voice Command and Voice Search
-     * use cases and performance may vary for other use cases (e.g., phone call
+     * See [Language
+     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+     * of the currently supported language codes. If alternative languages are
+     * listed, recognition result will contain recognition in the most likely
+     * language detected including the main language_code. The recognition result
+     * will include the language tag of the language detected in the audio. Note:
+     * This feature is only supported for Voice Command and Voice Search use cases
+     * and performance may vary for other use cases (e.g., phone call
      * transcription).
      * 
* @@ -2385,17 +2386,17 @@ public com.google.protobuf.ProtocolStringList getAlternativeLanguageCodesList() * * *
-     * *Optional* A list of up to 3 additional
+     * A list of up to 3 additional
      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
      * listing possible alternative languages of the supplied audio.
-     * See [Language Support](/speech-to-text/docs/languages)
-     * for a list of the currently supported language codes.
-     * If alternative languages are listed, recognition result will contain
-     * recognition in the most likely language detected including the main
-     * language_code. The recognition result will include the language tag
-     * of the language detected in the audio.
-     * Note: This feature is only supported for Voice Command and Voice Search
-     * use cases and performance may vary for other use cases (e.g., phone call
+     * See [Language
+     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+     * of the currently supported language codes. If alternative languages are
+     * listed, recognition result will contain recognition in the most likely
+     * language detected including the main language_code. The recognition result
+     * will include the language tag of the language detected in the audio. Note:
+     * This feature is only supported for Voice Command and Voice Search use cases
+     * and performance may vary for other use cases (e.g., phone call
      * transcription).
      * 
* @@ -2408,17 +2409,17 @@ public int getAlternativeLanguageCodesCount() { * * *
-     * *Optional* A list of up to 3 additional
+     * A list of up to 3 additional
      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
      * listing possible alternative languages of the supplied audio.
-     * See [Language Support](/speech-to-text/docs/languages)
-     * for a list of the currently supported language codes.
-     * If alternative languages are listed, recognition result will contain
-     * recognition in the most likely language detected including the main
-     * language_code. The recognition result will include the language tag
-     * of the language detected in the audio.
-     * Note: This feature is only supported for Voice Command and Voice Search
-     * use cases and performance may vary for other use cases (e.g., phone call
+     * See [Language
+     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+     * of the currently supported language codes. If alternative languages are
+     * listed, recognition result will contain recognition in the most likely
+     * language detected including the main language_code. The recognition result
+     * will include the language tag of the language detected in the audio. Note:
+     * This feature is only supported for Voice Command and Voice Search use cases
+     * and performance may vary for other use cases (e.g., phone call
      * transcription).
      * 
* @@ -2431,17 +2432,17 @@ public java.lang.String getAlternativeLanguageCodes(int index) { * * *
-     * *Optional* A list of up to 3 additional
+     * A list of up to 3 additional
      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
      * listing possible alternative languages of the supplied audio.
-     * See [Language Support](/speech-to-text/docs/languages)
-     * for a list of the currently supported language codes.
-     * If alternative languages are listed, recognition result will contain
-     * recognition in the most likely language detected including the main
-     * language_code. The recognition result will include the language tag
-     * of the language detected in the audio.
-     * Note: This feature is only supported for Voice Command and Voice Search
-     * use cases and performance may vary for other use cases (e.g., phone call
+     * See [Language
+     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+     * of the currently supported language codes. If alternative languages are
+     * listed, recognition result will contain recognition in the most likely
+     * language detected including the main language_code. The recognition result
+     * will include the language tag of the language detected in the audio. Note:
+     * This feature is only supported for Voice Command and Voice Search use cases
+     * and performance may vary for other use cases (e.g., phone call
      * transcription).
      * 
* @@ -2454,17 +2455,17 @@ public com.google.protobuf.ByteString getAlternativeLanguageCodesBytes(int index * * *
-     * *Optional* A list of up to 3 additional
+     * A list of up to 3 additional
      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
      * listing possible alternative languages of the supplied audio.
-     * See [Language Support](/speech-to-text/docs/languages)
-     * for a list of the currently supported language codes.
-     * If alternative languages are listed, recognition result will contain
-     * recognition in the most likely language detected including the main
-     * language_code. The recognition result will include the language tag
-     * of the language detected in the audio.
-     * Note: This feature is only supported for Voice Command and Voice Search
-     * use cases and performance may vary for other use cases (e.g., phone call
+     * See [Language
+     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+     * of the currently supported language codes. If alternative languages are
+     * listed, recognition result will contain recognition in the most likely
+     * language detected including the main language_code. The recognition result
+     * will include the language tag of the language detected in the audio. Note:
+     * This feature is only supported for Voice Command and Voice Search use cases
+     * and performance may vary for other use cases (e.g., phone call
      * transcription).
      * 
* @@ -2483,17 +2484,17 @@ public Builder setAlternativeLanguageCodes(int index, java.lang.String value) { * * *
-     * *Optional* A list of up to 3 additional
+     * A list of up to 3 additional
      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
      * listing possible alternative languages of the supplied audio.
-     * See [Language Support](/speech-to-text/docs/languages)
-     * for a list of the currently supported language codes.
-     * If alternative languages are listed, recognition result will contain
-     * recognition in the most likely language detected including the main
-     * language_code. The recognition result will include the language tag
-     * of the language detected in the audio.
-     * Note: This feature is only supported for Voice Command and Voice Search
-     * use cases and performance may vary for other use cases (e.g., phone call
+     * See [Language
+     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+     * of the currently supported language codes. If alternative languages are
+     * listed, recognition result will contain recognition in the most likely
+     * language detected including the main language_code. The recognition result
+     * will include the language tag of the language detected in the audio. Note:
+     * This feature is only supported for Voice Command and Voice Search use cases
+     * and performance may vary for other use cases (e.g., phone call
      * transcription).
      * 
* @@ -2512,17 +2513,17 @@ public Builder addAlternativeLanguageCodes(java.lang.String value) { * * *
-     * *Optional* A list of up to 3 additional
+     * A list of up to 3 additional
      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
      * listing possible alternative languages of the supplied audio.
-     * See [Language Support](/speech-to-text/docs/languages)
-     * for a list of the currently supported language codes.
-     * If alternative languages are listed, recognition result will contain
-     * recognition in the most likely language detected including the main
-     * language_code. The recognition result will include the language tag
-     * of the language detected in the audio.
-     * Note: This feature is only supported for Voice Command and Voice Search
-     * use cases and performance may vary for other use cases (e.g., phone call
+     * See [Language
+     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+     * of the currently supported language codes. If alternative languages are
+     * listed, recognition result will contain recognition in the most likely
+     * language detected including the main language_code. The recognition result
+     * will include the language tag of the language detected in the audio. Note:
+     * This feature is only supported for Voice Command and Voice Search use cases
+     * and performance may vary for other use cases (e.g., phone call
      * transcription).
      * 
* @@ -2538,17 +2539,17 @@ public Builder addAllAlternativeLanguageCodes(java.lang.Iterable - * *Optional* A list of up to 3 additional + * A list of up to 3 additional * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags, * listing possible alternative languages of the supplied audio. - * See [Language Support](/speech-to-text/docs/languages) - * for a list of the currently supported language codes. - * If alternative languages are listed, recognition result will contain - * recognition in the most likely language detected including the main - * language_code. The recognition result will include the language tag - * of the language detected in the audio. - * Note: This feature is only supported for Voice Command and Voice Search - * use cases and performance may vary for other use cases (e.g., phone call + * See [Language + * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list + * of the currently supported language codes. If alternative languages are + * listed, recognition result will contain recognition in the most likely + * language detected including the main language_code. The recognition result + * will include the language tag of the language detected in the audio. Note: + * This feature is only supported for Voice Command and Voice Search use cases + * and performance may vary for other use cases (e.g., phone call * transcription). *
* @@ -2564,17 +2565,17 @@ public Builder clearAlternativeLanguageCodes() { * * *
-     * *Optional* A list of up to 3 additional
+     * A list of up to 3 additional
      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
      * listing possible alternative languages of the supplied audio.
-     * See [Language Support](/speech-to-text/docs/languages)
-     * for a list of the currently supported language codes.
-     * If alternative languages are listed, recognition result will contain
-     * recognition in the most likely language detected including the main
-     * language_code. The recognition result will include the language tag
-     * of the language detected in the audio.
-     * Note: This feature is only supported for Voice Command and Voice Search
-     * use cases and performance may vary for other use cases (e.g., phone call
+     * See [Language
+     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+     * of the currently supported language codes. If alternative languages are
+     * listed, recognition result will contain recognition in the most likely
+     * language detected including the main language_code. The recognition result
+     * will include the language tag of the language detected in the audio. Note:
+     * This feature is only supported for Voice Command and Voice Search use cases
+     * and performance may vary for other use cases (e.g., phone call
      * transcription).
      * 
* @@ -2596,7 +2597,7 @@ public Builder addAlternativeLanguageCodesBytes(com.google.protobuf.ByteString v * * *
-     * *Optional* Maximum number of recognition hypotheses to be returned.
+     * Maximum number of recognition hypotheses to be returned.
      * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
      * within each `SpeechRecognitionResult`.
      * The server may return fewer than `max_alternatives`.
@@ -2613,7 +2614,7 @@ public int getMaxAlternatives() {
      *
      *
      * 
-     * *Optional* Maximum number of recognition hypotheses to be returned.
+     * Maximum number of recognition hypotheses to be returned.
      * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
      * within each `SpeechRecognitionResult`.
      * The server may return fewer than `max_alternatives`.
@@ -2633,7 +2634,7 @@ public Builder setMaxAlternatives(int value) {
      *
      *
      * 
-     * *Optional* Maximum number of recognition hypotheses to be returned.
+     * Maximum number of recognition hypotheses to be returned.
      * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
      * within each `SpeechRecognitionResult`.
      * The server may return fewer than `max_alternatives`.
@@ -2655,7 +2656,7 @@ public Builder clearMaxAlternatives() {
      *
      *
      * 
-     * *Optional* If set to `true`, the server will attempt to filter out
+     * If set to `true`, the server will attempt to filter out
      * profanities, replacing all but the initial character in each filtered word
      * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
      * won't be filtered out.
@@ -2670,7 +2671,7 @@ public boolean getProfanityFilter() {
      *
      *
      * 
-     * *Optional* If set to `true`, the server will attempt to filter out
+     * If set to `true`, the server will attempt to filter out
      * profanities, replacing all but the initial character in each filtered word
      * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
      * won't be filtered out.
@@ -2688,7 +2689,7 @@ public Builder setProfanityFilter(boolean value) {
      *
      *
      * 
-     * *Optional* If set to `true`, the server will attempt to filter out
+     * If set to `true`, the server will attempt to filter out
      * profanities, replacing all but the initial character in each filtered word
      * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
      * won't be filtered out.
@@ -2725,10 +2726,11 @@ private void ensureSpeechContextsIsMutable() {
      *
      *
      * 
-     * *Optional* array of
-     * [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-     * provide context to assist the speech recognition. For more information, see
-     * [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+     * A means to provide context to assist the speech recognition. For more
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -2744,10 +2746,11 @@ public java.util.List getSpeech * * *
-     * *Optional* array of
-     * [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-     * provide context to assist the speech recognition. For more information, see
-     * [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+     * A means to provide context to assist the speech recognition. For more
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -2763,10 +2766,11 @@ public int getSpeechContextsCount() { * * *
-     * *Optional* array of
-     * [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-     * provide context to assist the speech recognition. For more information, see
-     * [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+     * A means to provide context to assist the speech recognition. For more
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -2782,10 +2786,11 @@ public com.google.cloud.speech.v1p1beta1.SpeechContext getSpeechContexts(int ind * * *
-     * *Optional* array of
-     * [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-     * provide context to assist the speech recognition. For more information, see
-     * [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+     * A means to provide context to assist the speech recognition. For more
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -2808,10 +2813,11 @@ public Builder setSpeechContexts( * * *
-     * *Optional* array of
-     * [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-     * provide context to assist the speech recognition. For more information, see
-     * [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+     * A means to provide context to assist the speech recognition. For more
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -2831,10 +2837,11 @@ public Builder setSpeechContexts( * * *
-     * *Optional* array of
-     * [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-     * provide context to assist the speech recognition. For more information, see
-     * [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+     * A means to provide context to assist the speech recognition. For more
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -2856,10 +2863,11 @@ public Builder addSpeechContexts(com.google.cloud.speech.v1p1beta1.SpeechContext * * *
-     * *Optional* array of
-     * [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-     * provide context to assist the speech recognition. For more information, see
-     * [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+     * A means to provide context to assist the speech recognition. For more
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -2882,10 +2890,11 @@ public Builder addSpeechContexts( * * *
-     * *Optional* array of
-     * [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-     * provide context to assist the speech recognition. For more information, see
-     * [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+     * A means to provide context to assist the speech recognition. For more
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -2905,10 +2914,11 @@ public Builder addSpeechContexts( * * *
-     * *Optional* array of
-     * [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-     * provide context to assist the speech recognition. For more information, see
-     * [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+     * A means to provide context to assist the speech recognition. For more
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -2928,10 +2938,11 @@ public Builder addSpeechContexts( * * *
-     * *Optional* array of
-     * [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-     * provide context to assist the speech recognition. For more information, see
-     * [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+     * A means to provide context to assist the speech recognition. For more
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -2951,10 +2962,11 @@ public Builder addAllSpeechContexts( * * *
-     * *Optional* array of
-     * [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-     * provide context to assist the speech recognition. For more information, see
-     * [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+     * A means to provide context to assist the speech recognition. For more
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -2973,10 +2985,11 @@ public Builder clearSpeechContexts() { * * *
-     * *Optional* array of
-     * [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-     * provide context to assist the speech recognition. For more information, see
-     * [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+     * A means to provide context to assist the speech recognition. For more
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -2995,10 +3008,11 @@ public Builder removeSpeechContexts(int index) { * * *
-     * *Optional* array of
-     * [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-     * provide context to assist the speech recognition. For more information, see
-     * [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+     * A means to provide context to assist the speech recognition. For more
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -3011,10 +3025,11 @@ public com.google.cloud.speech.v1p1beta1.SpeechContext.Builder getSpeechContexts * * *
-     * *Optional* array of
-     * [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-     * provide context to assist the speech recognition. For more information, see
-     * [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+     * A means to provide context to assist the speech recognition. For more
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -3031,10 +3046,11 @@ public com.google.cloud.speech.v1p1beta1.SpeechContextOrBuilder getSpeechContext * * *
-     * *Optional* array of
-     * [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-     * provide context to assist the speech recognition. For more information, see
-     * [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+     * A means to provide context to assist the speech recognition. For more
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -3051,10 +3067,11 @@ public com.google.cloud.speech.v1p1beta1.SpeechContextOrBuilder getSpeechContext * * *
-     * *Optional* array of
-     * [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-     * provide context to assist the speech recognition. For more information, see
-     * [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+     * A means to provide context to assist the speech recognition. For more
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -3067,10 +3084,11 @@ public com.google.cloud.speech.v1p1beta1.SpeechContext.Builder addSpeechContexts * * *
-     * *Optional* array of
-     * [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-     * provide context to assist the speech recognition. For more information, see
-     * [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+     * A means to provide context to assist the speech recognition. For more
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -3084,10 +3102,11 @@ public com.google.cloud.speech.v1p1beta1.SpeechContext.Builder addSpeechContexts * * *
-     * *Optional* array of
-     * [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-     * provide context to assist the speech recognition. For more information, see
-     * [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+     * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+     * A means to provide context to assist the speech recognition. For more
+     * information, see
+     * [speech
+     * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
      * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -3122,7 +3141,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechContext.Builder addSpeechContexts * * *
-     * *Optional* If `true`, the top result includes a list of words and
+     * If `true`, the top result includes a list of words and
      * the start and end time offsets (timestamps) for those words. If
      * `false`, no word-level time offset information is returned. The default is
      * `false`.
@@ -3137,7 +3156,7 @@ public boolean getEnableWordTimeOffsets() {
      *
      *
      * 
-     * *Optional* If `true`, the top result includes a list of words and
+     * If `true`, the top result includes a list of words and
      * the start and end time offsets (timestamps) for those words. If
      * `false`, no word-level time offset information is returned. The default is
      * `false`.
@@ -3155,7 +3174,7 @@ public Builder setEnableWordTimeOffsets(boolean value) {
      *
      *
      * 
-     * *Optional* If `true`, the top result includes a list of words and
+     * If `true`, the top result includes a list of words and
      * the start and end time offsets (timestamps) for those words. If
      * `false`, no word-level time offset information is returned. The default is
      * `false`.
@@ -3175,7 +3194,7 @@ public Builder clearEnableWordTimeOffsets() {
      *
      *
      * 
-     * *Optional* If `true`, the top result includes a list of words and the
+     * If `true`, the top result includes a list of words and the
      * confidence for those words. If `false`, no word-level confidence
      * information is returned. The default is `false`.
      * 
@@ -3189,7 +3208,7 @@ public boolean getEnableWordConfidence() { * * *
-     * *Optional* If `true`, the top result includes a list of words and the
+     * If `true`, the top result includes a list of words and the
      * confidence for those words. If `false`, no word-level confidence
      * information is returned. The default is `false`.
      * 
@@ -3206,7 +3225,7 @@ public Builder setEnableWordConfidence(boolean value) { * * *
-     * *Optional* If `true`, the top result includes a list of words and the
+     * If `true`, the top result includes a list of words and the
      * confidence for those words. If `false`, no word-level confidence
      * information is returned. The default is `false`.
      * 
@@ -3225,7 +3244,7 @@ public Builder clearEnableWordConfidence() { * * *
-     * *Optional* If 'true', adds punctuation to recognition result hypotheses.
+     * If 'true', adds punctuation to recognition result hypotheses.
      * This feature is only available in select languages. Setting this for
      * requests in other languages has no effect at all.
      * The default 'false' value does not add punctuation to result hypotheses.
@@ -3243,7 +3262,7 @@ public boolean getEnableAutomaticPunctuation() {
      *
      *
      * 
-     * *Optional* If 'true', adds punctuation to recognition result hypotheses.
+     * If 'true', adds punctuation to recognition result hypotheses.
      * This feature is only available in select languages. Setting this for
      * requests in other languages has no effect at all.
      * The default 'false' value does not add punctuation to result hypotheses.
@@ -3264,7 +3283,7 @@ public Builder setEnableAutomaticPunctuation(boolean value) {
      *
      *
      * 
-     * *Optional* If 'true', adds punctuation to recognition result hypotheses.
+     * If 'true', adds punctuation to recognition result hypotheses.
      * This feature is only available in select languages. Setting this for
      * requests in other languages has no effect at all.
      * The default 'false' value does not add punctuation to result hypotheses.
@@ -3287,7 +3306,7 @@ public Builder clearEnableAutomaticPunctuation() {
      *
      *
      * 
-     * *Optional* If 'true', enables speaker detection for each recognized word in
+     * If 'true', enables speaker detection for each recognized word in
      * the top alternative of the recognition result using a speaker_tag provided
      * in the WordInfo.
      * Note: Use diarization_config instead.
@@ -3303,7 +3322,7 @@ public boolean getEnableSpeakerDiarization() {
      *
      *
      * 
-     * *Optional* If 'true', enables speaker detection for each recognized word in
+     * If 'true', enables speaker detection for each recognized word in
      * the top alternative of the recognition result using a speaker_tag provided
      * in the WordInfo.
      * Note: Use diarization_config instead.
@@ -3322,7 +3341,7 @@ public Builder setEnableSpeakerDiarization(boolean value) {
      *
      *
      * 
-     * *Optional* If 'true', enables speaker detection for each recognized word in
+     * If 'true', enables speaker detection for each recognized word in
      * the top alternative of the recognition result using a speaker_tag provided
      * in the WordInfo.
      * Note: Use diarization_config instead.
@@ -3343,7 +3362,6 @@ public Builder clearEnableSpeakerDiarization() {
      *
      *
      * 
-     * *Optional*
      * If set, specifies the estimated number of speakers in the conversation.
      * Defaults to '2'. Ignored unless enable_speaker_diarization is set to true.
      * Note: Use diarization_config instead.
@@ -3359,7 +3377,6 @@ public int getDiarizationSpeakerCount() {
      *
      *
      * 
-     * *Optional*
      * If set, specifies the estimated number of speakers in the conversation.
      * Defaults to '2'. Ignored unless enable_speaker_diarization is set to true.
      * Note: Use diarization_config instead.
@@ -3378,7 +3395,6 @@ public Builder setDiarizationSpeakerCount(int value) {
      *
      *
      * 
-     * *Optional*
      * If set, specifies the estimated number of speakers in the conversation.
      * Defaults to '2'. Ignored unless enable_speaker_diarization is set to true.
      * Note: Use diarization_config instead.
@@ -3404,7 +3420,7 @@ public Builder clearDiarizationSpeakerCount() {
      *
      *
      * 
-     * *Optional* Config to enable speaker diarization and set additional
+     * Config to enable speaker diarization and set additional
      * parameters to make diarization better suited for your application.
      * Note: When this is enabled, we send all the words from the beginning of the
      * audio for the top alternative in every consecutive STREAMING responses.
@@ -3423,7 +3439,7 @@ public boolean hasDiarizationConfig() {
      *
      *
      * 
-     * *Optional* Config to enable speaker diarization and set additional
+     * Config to enable speaker diarization and set additional
      * parameters to make diarization better suited for your application.
      * Note: When this is enabled, we send all the words from the beginning of the
      * audio for the top alternative in every consecutive STREAMING responses.
@@ -3448,7 +3464,7 @@ public com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig getDiarization
      *
      *
      * 
-     * *Optional* Config to enable speaker diarization and set additional
+     * Config to enable speaker diarization and set additional
      * parameters to make diarization better suited for your application.
      * Note: When this is enabled, we send all the words from the beginning of the
      * audio for the top alternative in every consecutive STREAMING responses.
@@ -3478,7 +3494,7 @@ public Builder setDiarizationConfig(
      *
      *
      * 
-     * *Optional* Config to enable speaker diarization and set additional
+     * Config to enable speaker diarization and set additional
      * parameters to make diarization better suited for your application.
      * Note: When this is enabled, we send all the words from the beginning of the
      * audio for the top alternative in every consecutive STREAMING responses.
@@ -3505,7 +3521,7 @@ public Builder setDiarizationConfig(
      *
      *
      * 
-     * *Optional* Config to enable speaker diarization and set additional
+     * Config to enable speaker diarization and set additional
      * parameters to make diarization better suited for your application.
      * Note: When this is enabled, we send all the words from the beginning of the
      * audio for the top alternative in every consecutive STREAMING responses.
@@ -3540,7 +3556,7 @@ public Builder mergeDiarizationConfig(
      *
      *
      * 
-     * *Optional* Config to enable speaker diarization and set additional
+     * Config to enable speaker diarization and set additional
      * parameters to make diarization better suited for your application.
      * Note: When this is enabled, we send all the words from the beginning of the
      * audio for the top alternative in every consecutive STREAMING responses.
@@ -3567,7 +3583,7 @@ public Builder clearDiarizationConfig() {
      *
      *
      * 
-     * *Optional* Config to enable speaker diarization and set additional
+     * Config to enable speaker diarization and set additional
      * parameters to make diarization better suited for your application.
      * Note: When this is enabled, we send all the words from the beginning of the
      * audio for the top alternative in every consecutive STREAMING responses.
@@ -3589,7 +3605,7 @@ public Builder clearDiarizationConfig() {
      *
      *
      * 
-     * *Optional* Config to enable speaker diarization and set additional
+     * Config to enable speaker diarization and set additional
      * parameters to make diarization better suited for your application.
      * Note: When this is enabled, we send all the words from the beginning of the
      * audio for the top alternative in every consecutive STREAMING responses.
@@ -3615,7 +3631,7 @@ public Builder clearDiarizationConfig() {
      *
      *
      * 
-     * *Optional* Config to enable speaker diarization and set additional
+     * Config to enable speaker diarization and set additional
      * parameters to make diarization better suited for your application.
      * Note: When this is enabled, we send all the words from the beginning of the
      * audio for the top alternative in every consecutive STREAMING responses.
@@ -3654,7 +3670,7 @@ public Builder clearDiarizationConfig() {
      *
      *
      * 
-     * *Optional* Metadata regarding this request.
+     * Metadata regarding this request.
      * 
* * .google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9; @@ -3666,7 +3682,7 @@ public boolean hasMetadata() { * * *
-     * *Optional* Metadata regarding this request.
+     * Metadata regarding this request.
      * 
* * .google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9; @@ -3684,7 +3700,7 @@ public com.google.cloud.speech.v1p1beta1.RecognitionMetadata getMetadata() { * * *
-     * *Optional* Metadata regarding this request.
+     * Metadata regarding this request.
      * 
* * .google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9; @@ -3706,7 +3722,7 @@ public Builder setMetadata(com.google.cloud.speech.v1p1beta1.RecognitionMetadata * * *
-     * *Optional* Metadata regarding this request.
+     * Metadata regarding this request.
      * 
* * .google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9; @@ -3726,7 +3742,7 @@ public Builder setMetadata( * * *
-     * *Optional* Metadata regarding this request.
+     * Metadata regarding this request.
      * 
* * .google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9; @@ -3752,7 +3768,7 @@ public Builder mergeMetadata(com.google.cloud.speech.v1p1beta1.RecognitionMetada * * *
-     * *Optional* Metadata regarding this request.
+     * Metadata regarding this request.
      * 
* * .google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9; @@ -3772,7 +3788,7 @@ public Builder clearMetadata() { * * *
-     * *Optional* Metadata regarding this request.
+     * Metadata regarding this request.
      * 
* * .google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9; @@ -3786,7 +3802,7 @@ public com.google.cloud.speech.v1p1beta1.RecognitionMetadata.Builder getMetadata * * *
-     * *Optional* Metadata regarding this request.
+     * Metadata regarding this request.
      * 
* * .google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9; @@ -3804,7 +3820,7 @@ public com.google.cloud.speech.v1p1beta1.RecognitionMetadataOrBuilder getMetadat * * *
-     * *Optional* Metadata regarding this request.
+     * Metadata regarding this request.
      * 
* * .google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9; @@ -3831,7 +3847,7 @@ public com.google.cloud.speech.v1p1beta1.RecognitionMetadataOrBuilder getMetadat * * *
-     * *Optional* Which model to select for the given request. Select the model
+     * Which model to select for the given request. Select the model
      * best suited to your domain to get best results. If a model is not
      * explicitly specified, then we auto-select a model based on the parameters
      * in the RecognitionConfig.
@@ -3882,7 +3898,7 @@ public java.lang.String getModel() {
      *
      *
      * 
-     * *Optional* Which model to select for the given request. Select the model
+     * Which model to select for the given request. Select the model
      * best suited to your domain to get best results. If a model is not
      * explicitly specified, then we auto-select a model based on the parameters
      * in the RecognitionConfig.
@@ -3933,7 +3949,7 @@ public com.google.protobuf.ByteString getModelBytes() {
      *
      *
      * 
-     * *Optional* Which model to select for the given request. Select the model
+     * Which model to select for the given request. Select the model
      * best suited to your domain to get best results. If a model is not
      * explicitly specified, then we auto-select a model based on the parameters
      * in the RecognitionConfig.
@@ -3982,7 +3998,7 @@ public Builder setModel(java.lang.String value) {
      *
      *
      * 
-     * *Optional* Which model to select for the given request. Select the model
+     * Which model to select for the given request. Select the model
      * best suited to your domain to get best results. If a model is not
      * explicitly specified, then we auto-select a model based on the parameters
      * in the RecognitionConfig.
@@ -4028,7 +4044,7 @@ public Builder clearModel() {
      *
      *
      * 
-     * *Optional* Which model to select for the given request. Select the model
+     * Which model to select for the given request. Select the model
      * best suited to your domain to get best results. If a model is not
      * explicitly specified, then we auto-select a model based on the parameters
      * in the RecognitionConfig.
@@ -4080,7 +4096,7 @@ public Builder setModelBytes(com.google.protobuf.ByteString value) {
      *
      *
      * 
-     * *Optional* Set to true to use an enhanced model for speech recognition.
+     * Set to true to use an enhanced model for speech recognition.
      * If `use_enhanced` is set to true and the `model` field is not set, then
      * an appropriate enhanced model is chosen if an enhanced model exists for
      * the audio.
@@ -4098,7 +4114,7 @@ public boolean getUseEnhanced() {
      *
      *
      * 
-     * *Optional* Set to true to use an enhanced model for speech recognition.
+     * Set to true to use an enhanced model for speech recognition.
      * If `use_enhanced` is set to true and the `model` field is not set, then
      * an appropriate enhanced model is chosen if an enhanced model exists for
      * the audio.
@@ -4119,7 +4135,7 @@ public Builder setUseEnhanced(boolean value) {
      *
      *
      * 
-     * *Optional* Set to true to use an enhanced model for speech recognition.
+     * Set to true to use an enhanced model for speech recognition.
      * If `use_enhanced` is set to true and the `model` field is not set, then
      * an appropriate enhanced model is chosen if an enhanced model exists for
      * the audio.
diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionConfigOrBuilder.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionConfigOrBuilder.java
index 3663667af..4a8d762e3 100644
--- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionConfigOrBuilder.java
+++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionConfigOrBuilder.java
@@ -29,8 +29,7 @@ public interface RecognitionConfigOrBuilder
    * 
    * Encoding of audio data sent in all `RecognitionAudio` messages.
    * This field is optional for `FLAC` and `WAV` audio files and required
-   * for all other audio formats. For details, see
-   * [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
+   * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
    * 
* * .google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding encoding = 1; @@ -42,8 +41,7 @@ public interface RecognitionConfigOrBuilder *
    * Encoding of audio data sent in all `RecognitionAudio` messages.
    * This field is optional for `FLAC` and `WAV` audio files and required
-   * for all other audio formats. For details, see
-   * [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
+   * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
    * 
* * .google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding encoding = 1; @@ -59,9 +57,8 @@ public interface RecognitionConfigOrBuilder * 16000 is optimal. For best results, set the sampling rate of the audio * source to 16000 Hz. If that's not possible, use the native sample rate of * the audio source (instead of re-sampling). - * This field is optional for `FLAC` and `WAV` audio files and required - * for all other audio formats. For details, see - * [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding]. + * This field is optional for FLAC and WAV audio files, but is + * required for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding]. *
* * int32 sample_rate_hertz = 2; @@ -72,7 +69,7 @@ public interface RecognitionConfigOrBuilder * * *
-   * *Optional* The number of channels in the input audio data.
+   * The number of channels in the input audio data.
    * ONLY set this for MULTI-CHANNEL recognition.
    * Valid values for LINEAR16 and FLAC are `1`-`8`.
    * Valid values for OGG_OPUS are '1'-'254'.
@@ -91,7 +88,7 @@ public interface RecognitionConfigOrBuilder
    *
    *
    * 
-   * This needs to be set to ‘true’ explicitly and `audio_channel_count` > 1
+   * This needs to be set to `true` explicitly and `audio_channel_count` > 1
    * to get each channel recognized separately. The recognition result will
    * contain a `channel_tag` field to state which channel that result belongs
    * to. If this is not true, we will only recognize the first channel. The
@@ -107,28 +104,30 @@ public interface RecognitionConfigOrBuilder
    *
    *
    * 
-   * *Required* The language of the supplied audio as a
+   * Required. The language of the supplied audio as a
    * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
    * Example: "en-US".
-   * See [Language Support](/speech-to-text/docs/languages)
-   * for a list of the currently supported language codes.
+   * See [Language
+   * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+   * of the currently supported language codes.
    * 
* - * string language_code = 3; + * string language_code = 3 [(.google.api.field_behavior) = REQUIRED]; */ java.lang.String getLanguageCode(); /** * * *
-   * *Required* The language of the supplied audio as a
+   * Required. The language of the supplied audio as a
    * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
    * Example: "en-US".
-   * See [Language Support](/speech-to-text/docs/languages)
-   * for a list of the currently supported language codes.
+   * See [Language
+   * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+   * of the currently supported language codes.
    * 
* - * string language_code = 3; + * string language_code = 3 [(.google.api.field_behavior) = REQUIRED]; */ com.google.protobuf.ByteString getLanguageCodeBytes(); @@ -136,17 +135,17 @@ public interface RecognitionConfigOrBuilder * * *
-   * *Optional* A list of up to 3 additional
+   * A list of up to 3 additional
    * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
    * listing possible alternative languages of the supplied audio.
-   * See [Language Support](/speech-to-text/docs/languages)
-   * for a list of the currently supported language codes.
-   * If alternative languages are listed, recognition result will contain
-   * recognition in the most likely language detected including the main
-   * language_code. The recognition result will include the language tag
-   * of the language detected in the audio.
-   * Note: This feature is only supported for Voice Command and Voice Search
-   * use cases and performance may vary for other use cases (e.g., phone call
+   * See [Language
+   * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+   * of the currently supported language codes. If alternative languages are
+   * listed, recognition result will contain recognition in the most likely
+   * language detected including the main language_code. The recognition result
+   * will include the language tag of the language detected in the audio. Note:
+   * This feature is only supported for Voice Command and Voice Search use cases
+   * and performance may vary for other use cases (e.g., phone call
    * transcription).
    * 
* @@ -157,17 +156,17 @@ public interface RecognitionConfigOrBuilder * * *
-   * *Optional* A list of up to 3 additional
+   * A list of up to 3 additional
    * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
    * listing possible alternative languages of the supplied audio.
-   * See [Language Support](/speech-to-text/docs/languages)
-   * for a list of the currently supported language codes.
-   * If alternative languages are listed, recognition result will contain
-   * recognition in the most likely language detected including the main
-   * language_code. The recognition result will include the language tag
-   * of the language detected in the audio.
-   * Note: This feature is only supported for Voice Command and Voice Search
-   * use cases and performance may vary for other use cases (e.g., phone call
+   * See [Language
+   * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+   * of the currently supported language codes. If alternative languages are
+   * listed, recognition result will contain recognition in the most likely
+   * language detected including the main language_code. The recognition result
+   * will include the language tag of the language detected in the audio. Note:
+   * This feature is only supported for Voice Command and Voice Search use cases
+   * and performance may vary for other use cases (e.g., phone call
    * transcription).
    * 
* @@ -178,17 +177,17 @@ public interface RecognitionConfigOrBuilder * * *
-   * *Optional* A list of up to 3 additional
+   * A list of up to 3 additional
    * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
    * listing possible alternative languages of the supplied audio.
-   * See [Language Support](/speech-to-text/docs/languages)
-   * for a list of the currently supported language codes.
-   * If alternative languages are listed, recognition result will contain
-   * recognition in the most likely language detected including the main
-   * language_code. The recognition result will include the language tag
-   * of the language detected in the audio.
-   * Note: This feature is only supported for Voice Command and Voice Search
-   * use cases and performance may vary for other use cases (e.g., phone call
+   * See [Language
+   * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+   * of the currently supported language codes. If alternative languages are
+   * listed, recognition result will contain recognition in the most likely
+   * language detected including the main language_code. The recognition result
+   * will include the language tag of the language detected in the audio. Note:
+   * This feature is only supported for Voice Command and Voice Search use cases
+   * and performance may vary for other use cases (e.g., phone call
    * transcription).
    * 
* @@ -199,17 +198,17 @@ public interface RecognitionConfigOrBuilder * * *
-   * *Optional* A list of up to 3 additional
+   * A list of up to 3 additional
    * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
    * listing possible alternative languages of the supplied audio.
-   * See [Language Support](/speech-to-text/docs/languages)
-   * for a list of the currently supported language codes.
-   * If alternative languages are listed, recognition result will contain
-   * recognition in the most likely language detected including the main
-   * language_code. The recognition result will include the language tag
-   * of the language detected in the audio.
-   * Note: This feature is only supported for Voice Command and Voice Search
-   * use cases and performance may vary for other use cases (e.g., phone call
+   * See [Language
+   * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+   * of the currently supported language codes. If alternative languages are
+   * listed, recognition result will contain recognition in the most likely
+   * language detected including the main language_code. The recognition result
+   * will include the language tag of the language detected in the audio. Note:
+   * This feature is only supported for Voice Command and Voice Search use cases
+   * and performance may vary for other use cases (e.g., phone call
    * transcription).
    * 
* @@ -221,7 +220,7 @@ public interface RecognitionConfigOrBuilder * * *
-   * *Optional* Maximum number of recognition hypotheses to be returned.
+   * Maximum number of recognition hypotheses to be returned.
    * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
    * within each `SpeechRecognitionResult`.
    * The server may return fewer than `max_alternatives`.
@@ -237,7 +236,7 @@ public interface RecognitionConfigOrBuilder
    *
    *
    * 
-   * *Optional* If set to `true`, the server will attempt to filter out
+   * If set to `true`, the server will attempt to filter out
    * profanities, replacing all but the initial character in each filtered word
    * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
    * won't be filtered out.
@@ -251,10 +250,11 @@ public interface RecognitionConfigOrBuilder
    *
    *
    * 
-   * *Optional* array of
-   * [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-   * provide context to assist the speech recognition. For more information, see
-   * [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+   * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+   * A means to provide context to assist the speech recognition. For more
+   * information, see
+   * [speech
+   * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
    * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -264,10 +264,11 @@ public interface RecognitionConfigOrBuilder * * *
-   * *Optional* array of
-   * [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-   * provide context to assist the speech recognition. For more information, see
-   * [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+   * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+   * A means to provide context to assist the speech recognition. For more
+   * information, see
+   * [speech
+   * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
    * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -277,10 +278,11 @@ public interface RecognitionConfigOrBuilder * * *
-   * *Optional* array of
-   * [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-   * provide context to assist the speech recognition. For more information, see
-   * [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+   * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+   * A means to provide context to assist the speech recognition. For more
+   * information, see
+   * [speech
+   * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
    * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -290,10 +292,11 @@ public interface RecognitionConfigOrBuilder * * *
-   * *Optional* array of
-   * [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-   * provide context to assist the speech recognition. For more information, see
-   * [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+   * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+   * A means to provide context to assist the speech recognition. For more
+   * information, see
+   * [speech
+   * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
    * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -304,10 +307,11 @@ public interface RecognitionConfigOrBuilder * * *
-   * *Optional* array of
-   * [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-   * provide context to assist the speech recognition. For more information, see
-   * [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+   * Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+   * A means to provide context to assist the speech recognition. For more
+   * information, see
+   * [speech
+   * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
    * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -318,7 +322,7 @@ public interface RecognitionConfigOrBuilder * * *
-   * *Optional* If `true`, the top result includes a list of words and
+   * If `true`, the top result includes a list of words and
    * the start and end time offsets (timestamps) for those words. If
    * `false`, no word-level time offset information is returned. The default is
    * `false`.
@@ -332,7 +336,7 @@ public interface RecognitionConfigOrBuilder
    *
    *
    * 
-   * *Optional* If `true`, the top result includes a list of words and the
+   * If `true`, the top result includes a list of words and the
    * confidence for those words. If `false`, no word-level confidence
    * information is returned. The default is `false`.
    * 
@@ -345,7 +349,7 @@ public interface RecognitionConfigOrBuilder * * *
-   * *Optional* If 'true', adds punctuation to recognition result hypotheses.
+   * If 'true', adds punctuation to recognition result hypotheses.
    * This feature is only available in select languages. Setting this for
    * requests in other languages has no effect at all.
    * The default 'false' value does not add punctuation to result hypotheses.
@@ -362,7 +366,7 @@ public interface RecognitionConfigOrBuilder
    *
    *
    * 
-   * *Optional* If 'true', enables speaker detection for each recognized word in
+   * If 'true', enables speaker detection for each recognized word in
    * the top alternative of the recognition result using a speaker_tag provided
    * in the WordInfo.
    * Note: Use diarization_config instead.
@@ -377,7 +381,6 @@ public interface RecognitionConfigOrBuilder
    *
    *
    * 
-   * *Optional*
    * If set, specifies the estimated number of speakers in the conversation.
    * Defaults to '2'. Ignored unless enable_speaker_diarization is set to true.
    * Note: Use diarization_config instead.
@@ -392,7 +395,7 @@ public interface RecognitionConfigOrBuilder
    *
    *
    * 
-   * *Optional* Config to enable speaker diarization and set additional
+   * Config to enable speaker diarization and set additional
    * parameters to make diarization better suited for your application.
    * Note: When this is enabled, we send all the words from the beginning of the
    * audio for the top alternative in every consecutive STREAMING responses.
@@ -409,7 +412,7 @@ public interface RecognitionConfigOrBuilder
    *
    *
    * 
-   * *Optional* Config to enable speaker diarization and set additional
+   * Config to enable speaker diarization and set additional
    * parameters to make diarization better suited for your application.
    * Note: When this is enabled, we send all the words from the beginning of the
    * audio for the top alternative in every consecutive STREAMING responses.
@@ -426,7 +429,7 @@ public interface RecognitionConfigOrBuilder
    *
    *
    * 
-   * *Optional* Config to enable speaker diarization and set additional
+   * Config to enable speaker diarization and set additional
    * parameters to make diarization better suited for your application.
    * Note: When this is enabled, we send all the words from the beginning of the
    * audio for the top alternative in every consecutive STREAMING responses.
@@ -445,7 +448,7 @@ public interface RecognitionConfigOrBuilder
    *
    *
    * 
-   * *Optional* Metadata regarding this request.
+   * Metadata regarding this request.
    * 
* * .google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9; @@ -455,7 +458,7 @@ public interface RecognitionConfigOrBuilder * * *
-   * *Optional* Metadata regarding this request.
+   * Metadata regarding this request.
    * 
* * .google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9; @@ -465,7 +468,7 @@ public interface RecognitionConfigOrBuilder * * *
-   * *Optional* Metadata regarding this request.
+   * Metadata regarding this request.
    * 
* * .google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9; @@ -476,7 +479,7 @@ public interface RecognitionConfigOrBuilder * * *
-   * *Optional* Which model to select for the given request. Select the model
+   * Which model to select for the given request. Select the model
    * best suited to your domain to get best results. If a model is not
    * explicitly specified, then we auto-select a model based on the parameters
    * in the RecognitionConfig.
@@ -517,7 +520,7 @@ public interface RecognitionConfigOrBuilder
    *
    *
    * 
-   * *Optional* Which model to select for the given request. Select the model
+   * Which model to select for the given request. Select the model
    * best suited to your domain to get best results. If a model is not
    * explicitly specified, then we auto-select a model based on the parameters
    * in the RecognitionConfig.
@@ -559,7 +562,7 @@ public interface RecognitionConfigOrBuilder
    *
    *
    * 
-   * *Optional* Set to true to use an enhanced model for speech recognition.
+   * Set to true to use an enhanced model for speech recognition.
    * If `use_enhanced` is set to true and the `model` field is not set, then
    * an appropriate enhanced model is chosen if an enhanced model exists for
    * the audio.
diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionMetadata.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionMetadata.java
index 89fb48ee2..92696d0b4 100644
--- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionMetadata.java
+++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionMetadata.java
@@ -1298,8 +1298,9 @@ public com.google.protobuf.ByteString getOriginalMimeTypeBytes() {
    * unique users using the service.
    * 
* - * int64 obfuscated_id = 9; + * int64 obfuscated_id = 9 [deprecated = true]; */ + @java.lang.Deprecated public long getObfuscatedId() { return obfuscatedId_; } @@ -2453,8 +2454,9 @@ public Builder setOriginalMimeTypeBytes(com.google.protobuf.ByteString value) { * unique users using the service. *
* - * int64 obfuscated_id = 9; + * int64 obfuscated_id = 9 [deprecated = true]; */ + @java.lang.Deprecated public long getObfuscatedId() { return obfuscatedId_; } @@ -2466,8 +2468,9 @@ public long getObfuscatedId() { * unique users using the service. *
* - * int64 obfuscated_id = 9; + * int64 obfuscated_id = 9 [deprecated = true]; */ + @java.lang.Deprecated public Builder setObfuscatedId(long value) { obfuscatedId_ = value; @@ -2482,8 +2485,9 @@ public Builder setObfuscatedId(long value) { * unique users using the service. *
* - * int64 obfuscated_id = 9; + * int64 obfuscated_id = 9 [deprecated = true]; */ + @java.lang.Deprecated public Builder clearObfuscatedId() { obfuscatedId_ = 0L; diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionMetadataOrBuilder.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionMetadataOrBuilder.java index 7ee7914a3..cd8fb17ad 100644 --- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionMetadataOrBuilder.java +++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionMetadataOrBuilder.java @@ -196,8 +196,9 @@ public interface RecognitionMetadataOrBuilder * unique users using the service. *
* - * int64 obfuscated_id = 9; + * int64 obfuscated_id = 9 [deprecated = true]; */ + @java.lang.Deprecated long getObfuscatedId(); /** diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognizeRequest.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognizeRequest.java index 8de30b391..f626506fe 100644 --- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognizeRequest.java +++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognizeRequest.java @@ -137,11 +137,13 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public boolean hasConfig() { return config_ != null; @@ -150,11 +152,13 @@ public boolean hasConfig() { * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1p1beta1.RecognitionConfig getConfig() { return config_ == null @@ -165,11 +169,13 @@ public com.google.cloud.speech.v1p1beta1.RecognitionConfig getConfig() { * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1p1beta1.RecognitionConfigOrBuilder getConfigOrBuilder() { return getConfig(); @@ -181,10 +187,12 @@ public com.google.cloud.speech.v1p1beta1.RecognitionConfigOrBuilder getConfigOrB * * *
-   * *Required* The audio data to be recognized.
+   * Required. The audio data to be recognized.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public boolean hasAudio() { return audio_ != null; @@ -193,10 +201,12 @@ public boolean hasAudio() { * * *
-   * *Required* The audio data to be recognized.
+   * Required. The audio data to be recognized.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1p1beta1.RecognitionAudio getAudio() { return audio_ == null @@ -207,10 +217,12 @@ public com.google.cloud.speech.v1p1beta1.RecognitionAudio getAudio() { * * *
-   * *Required* The audio data to be recognized.
+   * Required. The audio data to be recognized.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1p1beta1.RecognitionAudioOrBuilder getAudioOrBuilder() { return getAudio(); @@ -584,11 +596,13 @@ public Builder mergeFrom( * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public boolean hasConfig() { return configBuilder_ != null || config_ != null; @@ -597,11 +611,13 @@ public boolean hasConfig() { * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1p1beta1.RecognitionConfig getConfig() { if (configBuilder_ == null) { @@ -616,11 +632,13 @@ public com.google.cloud.speech.v1p1beta1.RecognitionConfig getConfig() { * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder setConfig(com.google.cloud.speech.v1p1beta1.RecognitionConfig value) { if (configBuilder_ == null) { @@ -639,11 +657,13 @@ public Builder setConfig(com.google.cloud.speech.v1p1beta1.RecognitionConfig val * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder setConfig( com.google.cloud.speech.v1p1beta1.RecognitionConfig.Builder builderForValue) { @@ -660,11 +680,13 @@ public Builder setConfig( * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder mergeConfig(com.google.cloud.speech.v1p1beta1.RecognitionConfig value) { if (configBuilder_ == null) { @@ -687,11 +709,13 @@ public Builder mergeConfig(com.google.cloud.speech.v1p1beta1.RecognitionConfig v * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder clearConfig() { if (configBuilder_ == null) { @@ -708,11 +732,13 @@ public Builder clearConfig() { * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1p1beta1.RecognitionConfig.Builder getConfigBuilder() { @@ -723,11 +749,13 @@ public com.google.cloud.speech.v1p1beta1.RecognitionConfig.Builder getConfigBuil * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1p1beta1.RecognitionConfigOrBuilder getConfigOrBuilder() { if (configBuilder_ != null) { @@ -742,11 +770,13 @@ public com.google.cloud.speech.v1p1beta1.RecognitionConfigOrBuilder getConfigOrB * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.speech.v1p1beta1.RecognitionConfig, @@ -775,10 +805,12 @@ public com.google.cloud.speech.v1p1beta1.RecognitionConfigOrBuilder getConfigOrB * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public boolean hasAudio() { return audioBuilder_ != null || audio_ != null; @@ -787,10 +819,12 @@ public boolean hasAudio() { * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1p1beta1.RecognitionAudio getAudio() { if (audioBuilder_ == null) { @@ -805,10 +839,12 @@ public com.google.cloud.speech.v1p1beta1.RecognitionAudio getAudio() { * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder setAudio(com.google.cloud.speech.v1p1beta1.RecognitionAudio value) { if (audioBuilder_ == null) { @@ -827,10 +863,12 @@ public Builder setAudio(com.google.cloud.speech.v1p1beta1.RecognitionAudio value * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder setAudio( com.google.cloud.speech.v1p1beta1.RecognitionAudio.Builder builderForValue) { @@ -847,10 +885,12 @@ public Builder setAudio( * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder mergeAudio(com.google.cloud.speech.v1p1beta1.RecognitionAudio value) { if (audioBuilder_ == null) { @@ -873,10 +913,12 @@ public Builder mergeAudio(com.google.cloud.speech.v1p1beta1.RecognitionAudio val * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder clearAudio() { if (audioBuilder_ == null) { @@ -893,10 +935,12 @@ public Builder clearAudio() { * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1p1beta1.RecognitionAudio.Builder getAudioBuilder() { @@ -907,10 +951,12 @@ public com.google.cloud.speech.v1p1beta1.RecognitionAudio.Builder getAudioBuilde * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1p1beta1.RecognitionAudioOrBuilder getAudioOrBuilder() { if (audioBuilder_ != null) { @@ -925,10 +971,12 @@ public com.google.cloud.speech.v1p1beta1.RecognitionAudioOrBuilder getAudioOrBui * * *
-     * *Required* The audio data to be recognized.
+     * Required. The audio data to be recognized.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.speech.v1p1beta1.RecognitionAudio, diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognizeRequestOrBuilder.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognizeRequestOrBuilder.java index 39f3e8561..0ad867062 100644 --- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognizeRequestOrBuilder.java +++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognizeRequestOrBuilder.java @@ -27,33 +27,39 @@ public interface RecognizeRequestOrBuilder * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ boolean hasConfig(); /** * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ com.google.cloud.speech.v1p1beta1.RecognitionConfig getConfig(); /** * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ com.google.cloud.speech.v1p1beta1.RecognitionConfigOrBuilder getConfigOrBuilder(); @@ -61,30 +67,36 @@ public interface RecognizeRequestOrBuilder * * *
-   * *Required* The audio data to be recognized.
+   * Required. The audio data to be recognized.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ boolean hasAudio(); /** * * *
-   * *Required* The audio data to be recognized.
+   * Required. The audio data to be recognized.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ com.google.cloud.speech.v1p1beta1.RecognitionAudio getAudio(); /** * * *
-   * *Required* The audio data to be recognized.
+   * Required. The audio data to be recognized.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2; + * + * .google.cloud.speech.v1p1beta1.RecognitionAudio audio = 2 [(.google.api.field_behavior) = REQUIRED]; + * */ com.google.cloud.speech.v1p1beta1.RecognitionAudioOrBuilder getAudioOrBuilder(); } diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognizeResponse.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognizeResponse.java index 82bc3a594..5c670486b 100644 --- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognizeResponse.java +++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognizeResponse.java @@ -124,7 +124,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -138,7 +138,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -153,7 +153,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -166,7 +166,7 @@ public int getResultsCount() { * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -179,7 +179,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult getResults(int * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -574,7 +574,7 @@ private void ensureResultsIsMutable() { * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -592,7 +592,7 @@ private void ensureResultsIsMutable() { * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -609,7 +609,7 @@ public int getResultsCount() { * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -626,7 +626,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult getResults(int * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -650,7 +650,7 @@ public Builder setResults( * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -672,7 +672,7 @@ public Builder setResults( * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -695,7 +695,7 @@ public Builder addResults(com.google.cloud.speech.v1p1beta1.SpeechRecognitionRes * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -719,7 +719,7 @@ public Builder addResults( * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -740,7 +740,7 @@ public Builder addResults( * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -762,7 +762,7 @@ public Builder addResults( * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -784,7 +784,7 @@ public Builder addAllResults( * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -804,7 +804,7 @@ public Builder clearResults() { * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -824,7 +824,7 @@ public Builder removeResults(int index) { * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -838,7 +838,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult.Builder getResu * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -856,7 +856,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechRecognitionResultOrBuilder getRes * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -875,7 +875,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechRecognitionResultOrBuilder getRes * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -890,7 +890,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult.Builder addResu * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* @@ -907,7 +907,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult.Builder addResu * * *
-     * Output only. Sequential list of transcription results corresponding to
+     * Sequential list of transcription results corresponding to
      * sequential portions of audio.
      * 
* diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognizeResponseOrBuilder.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognizeResponseOrBuilder.java index bb540389d..dc1757c17 100644 --- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognizeResponseOrBuilder.java +++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognizeResponseOrBuilder.java @@ -27,7 +27,7 @@ public interface RecognizeResponseOrBuilder * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -38,7 +38,7 @@ public interface RecognizeResponseOrBuilder * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -49,7 +49,7 @@ public interface RecognizeResponseOrBuilder * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -60,7 +60,7 @@ public interface RecognizeResponseOrBuilder * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* @@ -72,7 +72,7 @@ public interface RecognizeResponseOrBuilder * * *
-   * Output only. Sequential list of transcription results corresponding to
+   * Sequential list of transcription results corresponding to
    * sequential portions of audio.
    * 
* diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeakerDiarizationConfig.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeakerDiarizationConfig.java index 31251600f..07751bdbf 100644 --- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeakerDiarizationConfig.java +++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeakerDiarizationConfig.java @@ -22,7 +22,7 @@ * * *
- * *Optional* Config to enable speaker diarization.
+ * Config to enable speaker diarization.
  * 
* * Protobuf type {@code google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig} @@ -118,7 +118,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-   * *Optional* If 'true', enables speaker detection for each recognized word in
+   * If 'true', enables speaker detection for each recognized word in
    * the top alternative of the recognition result using a speaker_tag provided
    * in the WordInfo.
    * 
@@ -135,7 +135,6 @@ public boolean getEnableSpeakerDiarization() { * * *
-   * *Optional*
    * Minimum number of speakers in the conversation. This range gives you more
    * flexibility by allowing the system to automatically determine the correct
    * number of speakers. If not set, the default value is 2.
@@ -153,7 +152,6 @@ public int getMinSpeakerCount() {
    *
    *
    * 
-   * *Optional*
    * Maximum number of speakers in the conversation. This range gives you more
    * flexibility by allowing the system to automatically determine the correct
    * number of speakers. If not set, the default value is 6.
@@ -347,7 +345,7 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build
    *
    *
    * 
-   * *Optional* Config to enable speaker diarization.
+   * Config to enable speaker diarization.
    * 
* * Protobuf type {@code google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig} @@ -518,7 +516,7 @@ public Builder mergeFrom( * * *
-     * *Optional* If 'true', enables speaker detection for each recognized word in
+     * If 'true', enables speaker detection for each recognized word in
      * the top alternative of the recognition result using a speaker_tag provided
      * in the WordInfo.
      * 
@@ -532,7 +530,7 @@ public boolean getEnableSpeakerDiarization() { * * *
-     * *Optional* If 'true', enables speaker detection for each recognized word in
+     * If 'true', enables speaker detection for each recognized word in
      * the top alternative of the recognition result using a speaker_tag provided
      * in the WordInfo.
      * 
@@ -549,7 +547,7 @@ public Builder setEnableSpeakerDiarization(boolean value) { * * *
-     * *Optional* If 'true', enables speaker detection for each recognized word in
+     * If 'true', enables speaker detection for each recognized word in
      * the top alternative of the recognition result using a speaker_tag provided
      * in the WordInfo.
      * 
@@ -568,7 +566,6 @@ public Builder clearEnableSpeakerDiarization() { * * *
-     * *Optional*
      * Minimum number of speakers in the conversation. This range gives you more
      * flexibility by allowing the system to automatically determine the correct
      * number of speakers. If not set, the default value is 2.
@@ -583,7 +580,6 @@ public int getMinSpeakerCount() {
      *
      *
      * 
-     * *Optional*
      * Minimum number of speakers in the conversation. This range gives you more
      * flexibility by allowing the system to automatically determine the correct
      * number of speakers. If not set, the default value is 2.
@@ -601,7 +597,6 @@ public Builder setMinSpeakerCount(int value) {
      *
      *
      * 
-     * *Optional*
      * Minimum number of speakers in the conversation. This range gives you more
      * flexibility by allowing the system to automatically determine the correct
      * number of speakers. If not set, the default value is 2.
@@ -621,7 +616,6 @@ public Builder clearMinSpeakerCount() {
      *
      *
      * 
-     * *Optional*
      * Maximum number of speakers in the conversation. This range gives you more
      * flexibility by allowing the system to automatically determine the correct
      * number of speakers. If not set, the default value is 6.
@@ -636,7 +630,6 @@ public int getMaxSpeakerCount() {
      *
      *
      * 
-     * *Optional*
      * Maximum number of speakers in the conversation. This range gives you more
      * flexibility by allowing the system to automatically determine the correct
      * number of speakers. If not set, the default value is 6.
@@ -654,7 +647,6 @@ public Builder setMaxSpeakerCount(int value) {
      *
      *
      * 
-     * *Optional*
      * Maximum number of speakers in the conversation. This range gives you more
      * flexibility by allowing the system to automatically determine the correct
      * number of speakers. If not set, the default value is 6.
diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeakerDiarizationConfigOrBuilder.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeakerDiarizationConfigOrBuilder.java
index dce7de55a..bc63d9dc1 100644
--- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeakerDiarizationConfigOrBuilder.java
+++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeakerDiarizationConfigOrBuilder.java
@@ -27,7 +27,7 @@ public interface SpeakerDiarizationConfigOrBuilder
    *
    *
    * 
-   * *Optional* If 'true', enables speaker detection for each recognized word in
+   * If 'true', enables speaker detection for each recognized word in
    * the top alternative of the recognition result using a speaker_tag provided
    * in the WordInfo.
    * 
@@ -40,7 +40,6 @@ public interface SpeakerDiarizationConfigOrBuilder * * *
-   * *Optional*
    * Minimum number of speakers in the conversation. This range gives you more
    * flexibility by allowing the system to automatically determine the correct
    * number of speakers. If not set, the default value is 2.
@@ -54,7 +53,6 @@ public interface SpeakerDiarizationConfigOrBuilder
    *
    *
    * 
-   * *Optional*
    * Maximum number of speakers in the conversation. This range gives you more
    * flexibility by allowing the system to automatically determine the correct
    * number of speakers. If not set, the default value is 6.
diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechContext.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechContext.java
index 474c8478c..4c8b7df0e 100644
--- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechContext.java
+++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechContext.java
@@ -125,12 +125,12 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
    *
    *
    * 
-   * *Optional* A list of strings containing words and phrases "hints" so that
+   * A list of strings containing words and phrases "hints" so that
    * the speech recognition is more likely to recognize them. This can be used
    * to improve the accuracy for specific words and phrases, for example, if
    * specific commands are typically spoken by the user. This can also be used
    * to add additional words to the vocabulary of the recognizer. See
-   * [usage limits](/speech-to-text/quotas#content).
+   * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
    * List items can also be set to classes for groups of words that represent
    * common concepts that occur in natural language. For example, rather than
    * providing phrase hints for every month of the year, using the $MONTH class
@@ -147,12 +147,12 @@ public com.google.protobuf.ProtocolStringList getPhrasesList() {
    *
    *
    * 
-   * *Optional* A list of strings containing words and phrases "hints" so that
+   * A list of strings containing words and phrases "hints" so that
    * the speech recognition is more likely to recognize them. This can be used
    * to improve the accuracy for specific words and phrases, for example, if
    * specific commands are typically spoken by the user. This can also be used
    * to add additional words to the vocabulary of the recognizer. See
-   * [usage limits](/speech-to-text/quotas#content).
+   * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
    * List items can also be set to classes for groups of words that represent
    * common concepts that occur in natural language. For example, rather than
    * providing phrase hints for every month of the year, using the $MONTH class
@@ -169,12 +169,12 @@ public int getPhrasesCount() {
    *
    *
    * 
-   * *Optional* A list of strings containing words and phrases "hints" so that
+   * A list of strings containing words and phrases "hints" so that
    * the speech recognition is more likely to recognize them. This can be used
    * to improve the accuracy for specific words and phrases, for example, if
    * specific commands are typically spoken by the user. This can also be used
    * to add additional words to the vocabulary of the recognizer. See
-   * [usage limits](/speech-to-text/quotas#content).
+   * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
    * List items can also be set to classes for groups of words that represent
    * common concepts that occur in natural language. For example, rather than
    * providing phrase hints for every month of the year, using the $MONTH class
@@ -191,12 +191,12 @@ public java.lang.String getPhrases(int index) {
    *
    *
    * 
-   * *Optional* A list of strings containing words and phrases "hints" so that
+   * A list of strings containing words and phrases "hints" so that
    * the speech recognition is more likely to recognize them. This can be used
    * to improve the accuracy for specific words and phrases, for example, if
    * specific commands are typically spoken by the user. This can also be used
    * to add additional words to the vocabulary of the recognizer. See
-   * [usage limits](/speech-to-text/quotas#content).
+   * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
    * List items can also be set to classes for groups of words that represent
    * common concepts that occur in natural language. For example, rather than
    * providing phrase hints for every month of the year, using the $MONTH class
@@ -601,12 +601,12 @@ private void ensurePhrasesIsMutable() {
      *
      *
      * 
-     * *Optional* A list of strings containing words and phrases "hints" so that
+     * A list of strings containing words and phrases "hints" so that
      * the speech recognition is more likely to recognize them. This can be used
      * to improve the accuracy for specific words and phrases, for example, if
      * specific commands are typically spoken by the user. This can also be used
      * to add additional words to the vocabulary of the recognizer. See
-     * [usage limits](/speech-to-text/quotas#content).
+     * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
      * List items can also be set to classes for groups of words that represent
      * common concepts that occur in natural language. For example, rather than
      * providing phrase hints for every month of the year, using the $MONTH class
@@ -623,12 +623,12 @@ public com.google.protobuf.ProtocolStringList getPhrasesList() {
      *
      *
      * 
-     * *Optional* A list of strings containing words and phrases "hints" so that
+     * A list of strings containing words and phrases "hints" so that
      * the speech recognition is more likely to recognize them. This can be used
      * to improve the accuracy for specific words and phrases, for example, if
      * specific commands are typically spoken by the user. This can also be used
      * to add additional words to the vocabulary of the recognizer. See
-     * [usage limits](/speech-to-text/quotas#content).
+     * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
      * List items can also be set to classes for groups of words that represent
      * common concepts that occur in natural language. For example, rather than
      * providing phrase hints for every month of the year, using the $MONTH class
@@ -645,12 +645,12 @@ public int getPhrasesCount() {
      *
      *
      * 
-     * *Optional* A list of strings containing words and phrases "hints" so that
+     * A list of strings containing words and phrases "hints" so that
      * the speech recognition is more likely to recognize them. This can be used
      * to improve the accuracy for specific words and phrases, for example, if
      * specific commands are typically spoken by the user. This can also be used
      * to add additional words to the vocabulary of the recognizer. See
-     * [usage limits](/speech-to-text/quotas#content).
+     * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
      * List items can also be set to classes for groups of words that represent
      * common concepts that occur in natural language. For example, rather than
      * providing phrase hints for every month of the year, using the $MONTH class
@@ -667,12 +667,12 @@ public java.lang.String getPhrases(int index) {
      *
      *
      * 
-     * *Optional* A list of strings containing words and phrases "hints" so that
+     * A list of strings containing words and phrases "hints" so that
      * the speech recognition is more likely to recognize them. This can be used
      * to improve the accuracy for specific words and phrases, for example, if
      * specific commands are typically spoken by the user. This can also be used
      * to add additional words to the vocabulary of the recognizer. See
-     * [usage limits](/speech-to-text/quotas#content).
+     * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
      * List items can also be set to classes for groups of words that represent
      * common concepts that occur in natural language. For example, rather than
      * providing phrase hints for every month of the year, using the $MONTH class
@@ -689,12 +689,12 @@ public com.google.protobuf.ByteString getPhrasesBytes(int index) {
      *
      *
      * 
-     * *Optional* A list of strings containing words and phrases "hints" so that
+     * A list of strings containing words and phrases "hints" so that
      * the speech recognition is more likely to recognize them. This can be used
      * to improve the accuracy for specific words and phrases, for example, if
      * specific commands are typically spoken by the user. This can also be used
      * to add additional words to the vocabulary of the recognizer. See
-     * [usage limits](/speech-to-text/quotas#content).
+     * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
      * List items can also be set to classes for groups of words that represent
      * common concepts that occur in natural language. For example, rather than
      * providing phrase hints for every month of the year, using the $MONTH class
@@ -717,12 +717,12 @@ public Builder setPhrases(int index, java.lang.String value) {
      *
      *
      * 
-     * *Optional* A list of strings containing words and phrases "hints" so that
+     * A list of strings containing words and phrases "hints" so that
      * the speech recognition is more likely to recognize them. This can be used
      * to improve the accuracy for specific words and phrases, for example, if
      * specific commands are typically spoken by the user. This can also be used
      * to add additional words to the vocabulary of the recognizer. See
-     * [usage limits](/speech-to-text/quotas#content).
+     * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
      * List items can also be set to classes for groups of words that represent
      * common concepts that occur in natural language. For example, rather than
      * providing phrase hints for every month of the year, using the $MONTH class
@@ -745,12 +745,12 @@ public Builder addPhrases(java.lang.String value) {
      *
      *
      * 
-     * *Optional* A list of strings containing words and phrases "hints" so that
+     * A list of strings containing words and phrases "hints" so that
      * the speech recognition is more likely to recognize them. This can be used
      * to improve the accuracy for specific words and phrases, for example, if
      * specific commands are typically spoken by the user. This can also be used
      * to add additional words to the vocabulary of the recognizer. See
-     * [usage limits](/speech-to-text/quotas#content).
+     * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
      * List items can also be set to classes for groups of words that represent
      * common concepts that occur in natural language. For example, rather than
      * providing phrase hints for every month of the year, using the $MONTH class
@@ -770,12 +770,12 @@ public Builder addAllPhrases(java.lang.Iterable values) {
      *
      *
      * 
-     * *Optional* A list of strings containing words and phrases "hints" so that
+     * A list of strings containing words and phrases "hints" so that
      * the speech recognition is more likely to recognize them. This can be used
      * to improve the accuracy for specific words and phrases, for example, if
      * specific commands are typically spoken by the user. This can also be used
      * to add additional words to the vocabulary of the recognizer. See
-     * [usage limits](/speech-to-text/quotas#content).
+     * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
      * List items can also be set to classes for groups of words that represent
      * common concepts that occur in natural language. For example, rather than
      * providing phrase hints for every month of the year, using the $MONTH class
@@ -795,12 +795,12 @@ public Builder clearPhrases() {
      *
      *
      * 
-     * *Optional* A list of strings containing words and phrases "hints" so that
+     * A list of strings containing words and phrases "hints" so that
      * the speech recognition is more likely to recognize them. This can be used
      * to improve the accuracy for specific words and phrases, for example, if
      * specific commands are typically spoken by the user. This can also be used
      * to add additional words to the vocabulary of the recognizer. See
-     * [usage limits](/speech-to-text/quotas#content).
+     * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
      * List items can also be set to classes for groups of words that represent
      * common concepts that occur in natural language. For example, rather than
      * providing phrase hints for every month of the year, using the $MONTH class
diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechContextOrBuilder.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechContextOrBuilder.java
index a8a60017b..fd9132906 100644
--- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechContextOrBuilder.java
+++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechContextOrBuilder.java
@@ -27,12 +27,12 @@ public interface SpeechContextOrBuilder
    *
    *
    * 
-   * *Optional* A list of strings containing words and phrases "hints" so that
+   * A list of strings containing words and phrases "hints" so that
    * the speech recognition is more likely to recognize them. This can be used
    * to improve the accuracy for specific words and phrases, for example, if
    * specific commands are typically spoken by the user. This can also be used
    * to add additional words to the vocabulary of the recognizer. See
-   * [usage limits](/speech-to-text/quotas#content).
+   * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
    * List items can also be set to classes for groups of words that represent
    * common concepts that occur in natural language. For example, rather than
    * providing phrase hints for every month of the year, using the $MONTH class
@@ -47,12 +47,12 @@ public interface SpeechContextOrBuilder
    *
    *
    * 
-   * *Optional* A list of strings containing words and phrases "hints" so that
+   * A list of strings containing words and phrases "hints" so that
    * the speech recognition is more likely to recognize them. This can be used
    * to improve the accuracy for specific words and phrases, for example, if
    * specific commands are typically spoken by the user. This can also be used
    * to add additional words to the vocabulary of the recognizer. See
-   * [usage limits](/speech-to-text/quotas#content).
+   * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
    * List items can also be set to classes for groups of words that represent
    * common concepts that occur in natural language. For example, rather than
    * providing phrase hints for every month of the year, using the $MONTH class
@@ -67,12 +67,12 @@ public interface SpeechContextOrBuilder
    *
    *
    * 
-   * *Optional* A list of strings containing words and phrases "hints" so that
+   * A list of strings containing words and phrases "hints" so that
    * the speech recognition is more likely to recognize them. This can be used
    * to improve the accuracy for specific words and phrases, for example, if
    * specific commands are typically spoken by the user. This can also be used
    * to add additional words to the vocabulary of the recognizer. See
-   * [usage limits](/speech-to-text/quotas#content).
+   * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
    * List items can also be set to classes for groups of words that represent
    * common concepts that occur in natural language. For example, rather than
    * providing phrase hints for every month of the year, using the $MONTH class
@@ -87,12 +87,12 @@ public interface SpeechContextOrBuilder
    *
    *
    * 
-   * *Optional* A list of strings containing words and phrases "hints" so that
+   * A list of strings containing words and phrases "hints" so that
    * the speech recognition is more likely to recognize them. This can be used
    * to improve the accuracy for specific words and phrases, for example, if
    * specific commands are typically spoken by the user. This can also be used
    * to add additional words to the vocabulary of the recognizer. See
-   * [usage limits](/speech-to-text/quotas#content).
+   * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
    * List items can also be set to classes for groups of words that represent
    * common concepts that occur in natural language. For example, rather than
    * providing phrase hints for every month of the year, using the $MONTH class
diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechProto.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechProto.java
index 9eea65e27..77e53775f 100644
--- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechProto.java
+++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechProto.java
@@ -106,134 +106,140 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
     java.lang.String[] descriptorData = {
       "\n0google/cloud/speech/v1p1beta1/cloud_sp"
           + "eech.proto\022\035google.cloud.speech.v1p1beta"
-          + "1\032\034google/api/annotations.proto\032#google/"
-          + "longrunning/operations.proto\032\031google/pro"
-          + "tobuf/any.proto\032\036google/protobuf/duratio"
-          + "n.proto\032\033google/protobuf/empty.proto\032\037go"
-          + "ogle/protobuf/timestamp.proto\032\027google/rp"
-          + "c/status.proto\"\224\001\n\020RecognizeRequest\022@\n\006c"
-          + "onfig\030\001 \001(\01320.google.cloud.speech.v1p1be"
-          + "ta1.RecognitionConfig\022>\n\005audio\030\002 \001(\0132/.g"
-          + "oogle.cloud.speech.v1p1beta1.Recognition"
-          + "Audio\"\237\001\n\033LongRunningRecognizeRequest\022@\n"
-          + "\006config\030\001 \001(\01320.google.cloud.speech.v1p1"
-          + "beta1.RecognitionConfig\022>\n\005audio\030\002 \001(\0132/"
+          + "1\032\034google/api/annotations.proto\032\027google/"
+          + "api/client.proto\032\037google/api/field_behav"
+          + "ior.proto\032#google/longrunning/operations"
+          + ".proto\032\031google/protobuf/any.proto\032\036googl"
+          + "e/protobuf/duration.proto\032\037google/protob"
+          + "uf/timestamp.proto\032\027google/rpc/status.pr"
+          + "oto\"\236\001\n\020RecognizeRequest\022E\n\006config\030\001 \001(\013"
+          + "20.google.cloud.speech.v1p1beta1.Recogni"
+          + "tionConfigB\003\340A\002\022C\n\005audio\030\002 \001(\0132/.google."
+          + "cloud.speech.v1p1beta1.RecognitionAudioB"
+          + "\003\340A\002\"\251\001\n\033LongRunningRecognizeRequest\022E\n\006"
+          + "config\030\001 \001(\01320.google.cloud.speech.v1p1b"
+          + "eta1.RecognitionConfigB\003\340A\002\022C\n\005audio\030\002 \001"
+          + "(\0132/.google.cloud.speech.v1p1beta1.Recog"
+          + "nitionAudioB\003\340A\002\"\240\001\n\031StreamingRecognizeR"
+          + "equest\022U\n\020streaming_config\030\001 \001(\01329.googl"
+          + "e.cloud.speech.v1p1beta1.StreamingRecogn"
+          + "itionConfigH\000\022\027\n\raudio_content\030\002 \001(\014H\000B\023"
+          + "\n\021streaming_request\"\226\001\n\032StreamingRecogni"
+          + "tionConfig\022E\n\006config\030\001 \001(\01320.google.clou"
+          + "d.speech.v1p1beta1.RecognitionConfigB\003\340A"
+          + "\002\022\030\n\020single_utterance\030\002 \001(\010\022\027\n\017interim_r"
+          + "esults\030\003 \001(\010\"\227\007\n\021RecognitionConfig\022P\n\010en"
+          + "coding\030\001 \001(\0162>.google.cloud.speech.v1p1b"
+          + "eta1.RecognitionConfig.AudioEncoding\022\031\n\021"
+          + "sample_rate_hertz\030\002 \001(\005\022\033\n\023audio_channel"
+          + "_count\030\007 \001(\005\022/\n\'enable_separate_recognit"
+          + "ion_per_channel\030\014 \001(\010\022\032\n\rlanguage_code\030\003"
+          + " \001(\tB\003\340A\002\022\"\n\032alternative_language_codes\030"
+          + "\022 \003(\t\022\030\n\020max_alternatives\030\004 \001(\005\022\030\n\020profa"
+          + "nity_filter\030\005 \001(\010\022E\n\017speech_contexts\030\006 \003"
+          + "(\0132,.google.cloud.speech.v1p1beta1.Speec"
+          + "hContext\022 \n\030enable_word_time_offsets\030\010 \001"
+          + "(\010\022\036\n\026enable_word_confidence\030\017 \001(\010\022$\n\034en"
+          + "able_automatic_punctuation\030\013 \001(\010\022&\n\032enab"
+          + "le_speaker_diarization\030\020 \001(\010B\002\030\001\022%\n\031diar"
+          + "ization_speaker_count\030\021 \001(\005B\002\030\001\022S\n\022diari"
+          + "zation_config\030\023 \001(\01327.google.cloud.speec"
+          + "h.v1p1beta1.SpeakerDiarizationConfig\022D\n\010"
+          + "metadata\030\t \001(\01322.google.cloud.speech.v1p"
+          + "1beta1.RecognitionMetadata\022\r\n\005model\030\r \001("
+          + "\t\022\024\n\014use_enhanced\030\016 \001(\010\"\224\001\n\rAudioEncodin"
+          + "g\022\030\n\024ENCODING_UNSPECIFIED\020\000\022\014\n\010LINEAR16\020"
+          + "\001\022\010\n\004FLAC\020\002\022\t\n\005MULAW\020\003\022\007\n\003AMR\020\004\022\n\n\006AMR_W"
+          + "B\020\005\022\014\n\010OGG_OPUS\020\006\022\032\n\026SPEEX_WITH_HEADER_B"
+          + "YTE\020\007\022\007\n\003MP3\020\010\"t\n\030SpeakerDiarizationConf"
+          + "ig\022\"\n\032enable_speaker_diarization\030\001 \001(\010\022\031"
+          + "\n\021min_speaker_count\030\002 \001(\005\022\031\n\021max_speaker"
+          + "_count\030\003 \001(\005\"\327\010\n\023RecognitionMetadata\022\\\n\020"
+          + "interaction_type\030\001 \001(\0162B.google.cloud.sp"
+          + "eech.v1p1beta1.RecognitionMetadata.Inter"
+          + "actionType\022$\n\034industry_naics_code_of_aud"
+          + "io\030\003 \001(\r\022b\n\023microphone_distance\030\004 \001(\0162E."
+          + "google.cloud.speech.v1p1beta1.Recognitio"
+          + "nMetadata.MicrophoneDistance\022a\n\023original"
+          + "_media_type\030\005 \001(\0162D.google.cloud.speech."
+          + "v1p1beta1.RecognitionMetadata.OriginalMe"
+          + "diaType\022e\n\025recording_device_type\030\006 \001(\0162F"
           + ".google.cloud.speech.v1p1beta1.Recogniti"
-          + "onAudio\"\240\001\n\031StreamingRecognizeRequest\022U\n"
-          + "\020streaming_config\030\001 \001(\01329.google.cloud.s"
-          + "peech.v1p1beta1.StreamingRecognitionConf"
-          + "igH\000\022\027\n\raudio_content\030\002 \001(\014H\000B\023\n\021streami"
-          + "ng_request\"\221\001\n\032StreamingRecognitionConfi"
-          + "g\022@\n\006config\030\001 \001(\01320.google.cloud.speech."
-          + "v1p1beta1.RecognitionConfig\022\030\n\020single_ut"
-          + "terance\030\002 \001(\010\022\027\n\017interim_results\030\003 \001(\010\"\222"
-          + "\007\n\021RecognitionConfig\022P\n\010encoding\030\001 \001(\0162>"
-          + ".google.cloud.speech.v1p1beta1.Recogniti"
-          + "onConfig.AudioEncoding\022\031\n\021sample_rate_he"
-          + "rtz\030\002 \001(\005\022\033\n\023audio_channel_count\030\007 \001(\005\022/"
-          + "\n\'enable_separate_recognition_per_channe"
-          + "l\030\014 \001(\010\022\025\n\rlanguage_code\030\003 \001(\t\022\"\n\032altern"
-          + "ative_language_codes\030\022 \003(\t\022\030\n\020max_altern"
-          + "atives\030\004 \001(\005\022\030\n\020profanity_filter\030\005 \001(\010\022E"
-          + "\n\017speech_contexts\030\006 \003(\0132,.google.cloud.s"
-          + "peech.v1p1beta1.SpeechContext\022 \n\030enable_"
-          + "word_time_offsets\030\010 \001(\010\022\036\n\026enable_word_c"
-          + "onfidence\030\017 \001(\010\022$\n\034enable_automatic_punc"
-          + "tuation\030\013 \001(\010\022&\n\032enable_speaker_diarizat"
-          + "ion\030\020 \001(\010B\002\030\001\022%\n\031diarization_speaker_cou"
-          + "nt\030\021 \001(\005B\002\030\001\022S\n\022diarization_config\030\023 \001(\013"
-          + "27.google.cloud.speech.v1p1beta1.Speaker"
-          + "DiarizationConfig\022D\n\010metadata\030\t \001(\01322.go"
-          + "ogle.cloud.speech.v1p1beta1.RecognitionM"
-          + "etadata\022\r\n\005model\030\r \001(\t\022\024\n\014use_enhanced\030\016"
-          + " \001(\010\"\224\001\n\rAudioEncoding\022\030\n\024ENCODING_UNSPE"
-          + "CIFIED\020\000\022\014\n\010LINEAR16\020\001\022\010\n\004FLAC\020\002\022\t\n\005MULA"
-          + "W\020\003\022\007\n\003AMR\020\004\022\n\n\006AMR_WB\020\005\022\014\n\010OGG_OPUS\020\006\022\032"
-          + "\n\026SPEEX_WITH_HEADER_BYTE\020\007\022\007\n\003MP3\020\010\"t\n\030S"
-          + "peakerDiarizationConfig\022\"\n\032enable_speake"
-          + "r_diarization\030\001 \001(\010\022\031\n\021min_speaker_count"
-          + "\030\002 \001(\005\022\031\n\021max_speaker_count\030\003 \001(\005\"\323\010\n\023Re"
-          + "cognitionMetadata\022\\\n\020interaction_type\030\001 "
-          + "\001(\0162B.google.cloud.speech.v1p1beta1.Reco"
-          + "gnitionMetadata.InteractionType\022$\n\034indus"
-          + "try_naics_code_of_audio\030\003 \001(\r\022b\n\023microph"
-          + "one_distance\030\004 \001(\0162E.google.cloud.speech"
-          + ".v1p1beta1.RecognitionMetadata.Microphon"
-          + "eDistance\022a\n\023original_media_type\030\005 \001(\0162D"
-          + ".google.cloud.speech.v1p1beta1.Recogniti"
-          + "onMetadata.OriginalMediaType\022e\n\025recordin"
-          + "g_device_type\030\006 \001(\0162F.google.cloud.speec"
-          + "h.v1p1beta1.RecognitionMetadata.Recordin"
-          + "gDeviceType\022\035\n\025recording_device_name\030\007 \001"
-          + "(\t\022\032\n\022original_mime_type\030\010 \001(\t\022\025\n\robfusc"
-          + "ated_id\030\t \001(\003\022\023\n\013audio_topic\030\n \001(\t\"\305\001\n\017I"
-          + "nteractionType\022 \n\034INTERACTION_TYPE_UNSPE"
-          + "CIFIED\020\000\022\016\n\nDISCUSSION\020\001\022\020\n\014PRESENTATION"
-          + "\020\002\022\016\n\nPHONE_CALL\020\003\022\r\n\tVOICEMAIL\020\004\022\033\n\027PRO"
-          + "FESSIONALLY_PRODUCED\020\005\022\020\n\014VOICE_SEARCH\020\006"
-          + "\022\021\n\rVOICE_COMMAND\020\007\022\r\n\tDICTATION\020\010\"d\n\022Mi"
-          + "crophoneDistance\022#\n\037MICROPHONE_DISTANCE_"
-          + "UNSPECIFIED\020\000\022\r\n\tNEARFIELD\020\001\022\014\n\010MIDFIELD"
-          + "\020\002\022\014\n\010FARFIELD\020\003\"N\n\021OriginalMediaType\022#\n"
-          + "\037ORIGINAL_MEDIA_TYPE_UNSPECIFIED\020\000\022\t\n\005AU"
-          + "DIO\020\001\022\t\n\005VIDEO\020\002\"\244\001\n\023RecordingDeviceType"
-          + "\022%\n!RECORDING_DEVICE_TYPE_UNSPECIFIED\020\000\022"
-          + "\016\n\nSMARTPHONE\020\001\022\006\n\002PC\020\002\022\016\n\nPHONE_LINE\020\003\022"
-          + "\013\n\007VEHICLE\020\004\022\030\n\024OTHER_OUTDOOR_DEVICE\020\005\022\027"
-          + "\n\023OTHER_INDOOR_DEVICE\020\006\"/\n\rSpeechContext"
-          + "\022\017\n\007phrases\030\001 \003(\t\022\r\n\005boost\030\004 \001(\002\"D\n\020Reco"
-          + "gnitionAudio\022\021\n\007content\030\001 \001(\014H\000\022\r\n\003uri\030\002"
-          + " \001(\tH\000B\016\n\014audio_source\"\\\n\021RecognizeRespo"
-          + "nse\022G\n\007results\030\002 \003(\01326.google.cloud.spee"
-          + "ch.v1p1beta1.SpeechRecognitionResult\"g\n\034"
-          + "LongRunningRecognizeResponse\022G\n\007results\030"
-          + "\002 \003(\01326.google.cloud.speech.v1p1beta1.Sp"
-          + "eechRecognitionResult\"\236\001\n\034LongRunningRec"
-          + "ognizeMetadata\022\030\n\020progress_percent\030\001 \001(\005"
-          + "\022.\n\nstart_time\030\002 \001(\0132\032.google.protobuf.T"
-          + "imestamp\0224\n\020last_update_time\030\003 \001(\0132\032.goo"
-          + "gle.protobuf.Timestamp\"\277\002\n\032StreamingReco"
-          + "gnizeResponse\022!\n\005error\030\001 \001(\0132\022.google.rp"
-          + "c.Status\022J\n\007results\030\002 \003(\01329.google.cloud"
-          + ".speech.v1p1beta1.StreamingRecognitionRe"
-          + "sult\022d\n\021speech_event_type\030\004 \001(\0162I.google"
-          + ".cloud.speech.v1p1beta1.StreamingRecogni"
-          + "zeResponse.SpeechEventType\"L\n\017SpeechEven"
-          + "tType\022\034\n\030SPEECH_EVENT_UNSPECIFIED\020\000\022\033\n\027E"
-          + "ND_OF_SINGLE_UTTERANCE\020\001\"\364\001\n\032StreamingRe"
-          + "cognitionResult\022Q\n\014alternatives\030\001 \003(\0132;."
-          + "google.cloud.speech.v1p1beta1.SpeechReco"
-          + "gnitionAlternative\022\020\n\010is_final\030\002 \001(\010\022\021\n\t"
-          + "stability\030\003 \001(\002\0222\n\017result_end_time\030\004 \001(\013"
-          + "2\031.google.protobuf.Duration\022\023\n\013channel_t"
-          + "ag\030\005 \001(\005\022\025\n\rlanguage_code\030\006 \001(\t\"\230\001\n\027Spee"
-          + "chRecognitionResult\022Q\n\014alternatives\030\001 \003("
-          + "\0132;.google.cloud.speech.v1p1beta1.Speech"
-          + "RecognitionAlternative\022\023\n\013channel_tag\030\002 "
-          + "\001(\005\022\025\n\rlanguage_code\030\005 \001(\t\"~\n\034SpeechReco"
-          + "gnitionAlternative\022\022\n\ntranscript\030\001 \001(\t\022\022"
-          + "\n\nconfidence\030\002 \001(\002\0226\n\005words\030\003 \003(\0132\'.goog"
-          + "le.cloud.speech.v1p1beta1.WordInfo\"\235\001\n\010W"
-          + "ordInfo\022-\n\nstart_time\030\001 \001(\0132\031.google.pro"
-          + "tobuf.Duration\022+\n\010end_time\030\002 \001(\0132\031.googl"
-          + "e.protobuf.Duration\022\014\n\004word\030\003 \001(\t\022\022\n\ncon"
-          + "fidence\030\004 \001(\002\022\023\n\013speaker_tag\030\005 \001(\0052\332\003\n\006S"
-          + "peech\022\226\001\n\tRecognize\022/.google.cloud.speec"
-          + "h.v1p1beta1.RecognizeRequest\0320.google.cl"
-          + "oud.speech.v1p1beta1.RecognizeResponse\"&"
-          + "\202\323\344\223\002 \"\033/v1p1beta1/speech:recognize:\001*\022\244"
-          + "\001\n\024LongRunningRecognize\022:.google.cloud.s"
-          + "peech.v1p1beta1.LongRunningRecognizeRequ"
-          + "est\032\035.google.longrunning.Operation\"1\202\323\344\223"
-          + "\002+\"&/v1p1beta1/speech:longrunningrecogni"
-          + "ze:\001*\022\217\001\n\022StreamingRecognize\0228.google.cl"
-          + "oud.speech.v1p1beta1.StreamingRecognizeR"
-          + "equest\0329.google.cloud.speech.v1p1beta1.S"
-          + "treamingRecognizeResponse\"\000(\0010\001Bz\n!com.g"
-          + "oogle.cloud.speech.v1p1beta1B\013SpeechProt"
-          + "oP\001ZCgoogle.golang.org/genproto/googleap"
-          + "is/cloud/speech/v1p1beta1;speech\370\001\001b\006pro"
-          + "to3"
+          + "onMetadata.RecordingDeviceType\022\035\n\025record"
+          + "ing_device_name\030\007 \001(\t\022\032\n\022original_mime_t"
+          + "ype\030\010 \001(\t\022\031\n\robfuscated_id\030\t \001(\003B\002\030\001\022\023\n\013"
+          + "audio_topic\030\n \001(\t\"\305\001\n\017InteractionType\022 \n"
+          + "\034INTERACTION_TYPE_UNSPECIFIED\020\000\022\016\n\nDISCU"
+          + "SSION\020\001\022\020\n\014PRESENTATION\020\002\022\016\n\nPHONE_CALL\020"
+          + "\003\022\r\n\tVOICEMAIL\020\004\022\033\n\027PROFESSIONALLY_PRODU"
+          + "CED\020\005\022\020\n\014VOICE_SEARCH\020\006\022\021\n\rVOICE_COMMAND"
+          + "\020\007\022\r\n\tDICTATION\020\010\"d\n\022MicrophoneDistance\022"
+          + "#\n\037MICROPHONE_DISTANCE_UNSPECIFIED\020\000\022\r\n\t"
+          + "NEARFIELD\020\001\022\014\n\010MIDFIELD\020\002\022\014\n\010FARFIELD\020\003\""
+          + "N\n\021OriginalMediaType\022#\n\037ORIGINAL_MEDIA_T"
+          + "YPE_UNSPECIFIED\020\000\022\t\n\005AUDIO\020\001\022\t\n\005VIDEO\020\002\""
+          + "\244\001\n\023RecordingDeviceType\022%\n!RECORDING_DEV"
+          + "ICE_TYPE_UNSPECIFIED\020\000\022\016\n\nSMARTPHONE\020\001\022\006"
+          + "\n\002PC\020\002\022\016\n\nPHONE_LINE\020\003\022\013\n\007VEHICLE\020\004\022\030\n\024O"
+          + "THER_OUTDOOR_DEVICE\020\005\022\027\n\023OTHER_INDOOR_DE"
+          + "VICE\020\006\"/\n\rSpeechContext\022\017\n\007phrases\030\001 \003(\t"
+          + "\022\r\n\005boost\030\004 \001(\002\"D\n\020RecognitionAudio\022\021\n\007c"
+          + "ontent\030\001 \001(\014H\000\022\r\n\003uri\030\002 \001(\tH\000B\016\n\014audio_s"
+          + "ource\"\\\n\021RecognizeResponse\022G\n\007results\030\002 "
+          + "\003(\01326.google.cloud.speech.v1p1beta1.Spee"
+          + "chRecognitionResult\"g\n\034LongRunningRecogn"
+          + "izeResponse\022G\n\007results\030\002 \003(\01326.google.cl"
+          + "oud.speech.v1p1beta1.SpeechRecognitionRe"
+          + "sult\"\236\001\n\034LongRunningRecognizeMetadata\022\030\n"
+          + "\020progress_percent\030\001 \001(\005\022.\n\nstart_time\030\002 "
+          + "\001(\0132\032.google.protobuf.Timestamp\0224\n\020last_"
+          + "update_time\030\003 \001(\0132\032.google.protobuf.Time"
+          + "stamp\"\277\002\n\032StreamingRecognizeResponse\022!\n\005"
+          + "error\030\001 \001(\0132\022.google.rpc.Status\022J\n\007resul"
+          + "ts\030\002 \003(\01329.google.cloud.speech.v1p1beta1"
+          + ".StreamingRecognitionResult\022d\n\021speech_ev"
+          + "ent_type\030\004 \001(\0162I.google.cloud.speech.v1p"
+          + "1beta1.StreamingRecognizeResponse.Speech"
+          + "EventType\"L\n\017SpeechEventType\022\034\n\030SPEECH_E"
+          + "VENT_UNSPECIFIED\020\000\022\033\n\027END_OF_SINGLE_UTTE"
+          + "RANCE\020\001\"\364\001\n\032StreamingRecognitionResult\022Q"
+          + "\n\014alternatives\030\001 \003(\0132;.google.cloud.spee"
+          + "ch.v1p1beta1.SpeechRecognitionAlternativ"
+          + "e\022\020\n\010is_final\030\002 \001(\010\022\021\n\tstability\030\003 \001(\002\0222"
+          + "\n\017result_end_time\030\004 \001(\0132\031.google.protobu"
+          + "f.Duration\022\023\n\013channel_tag\030\005 \001(\005\022\025\n\rlangu"
+          + "age_code\030\006 \001(\t\"\230\001\n\027SpeechRecognitionResu"
+          + "lt\022Q\n\014alternatives\030\001 \003(\0132;.google.cloud."
+          + "speech.v1p1beta1.SpeechRecognitionAltern"
+          + "ative\022\023\n\013channel_tag\030\002 \001(\005\022\025\n\rlanguage_c"
+          + "ode\030\005 \001(\t\"~\n\034SpeechRecognitionAlternativ"
+          + "e\022\022\n\ntranscript\030\001 \001(\t\022\022\n\nconfidence\030\002 \001("
+          + "\002\0226\n\005words\030\003 \003(\0132\'.google.cloud.speech.v"
+          + "1p1beta1.WordInfo\"\235\001\n\010WordInfo\022-\n\nstart_"
+          + "time\030\001 \001(\0132\031.google.protobuf.Duration\022+\n"
+          + "\010end_time\030\002 \001(\0132\031.google.protobuf.Durati"
+          + "on\022\014\n\004word\030\003 \001(\t\022\022\n\nconfidence\030\004 \001(\002\022\023\n\013"
+          + "speaker_tag\030\005 \001(\0052\202\005\n\006Speech\022\245\001\n\tRecogni"
+          + "ze\022/.google.cloud.speech.v1p1beta1.Recog"
+          + "nizeRequest\0320.google.cloud.speech.v1p1be"
+          + "ta1.RecognizeResponse\"5\202\323\344\223\002 \"\033/v1p1beta"
+          + "1/speech:recognize:\001*\332A\014config,audio\022\362\001\n"
+          + "\024LongRunningRecognize\022:.google.cloud.spe"
+          + "ech.v1p1beta1.LongRunningRecognizeReques"
+          + "t\032\035.google.longrunning.Operation\"\177\202\323\344\223\002+"
+          + "\"&/v1p1beta1/speech:longrunningrecognize"
+          + ":\001*\332A\014config,audio\312A<\n\034LongRunningRecogn"
+          + "izeResponse\022\034LongRunningRecognizeMetadat"
+          + "a\022\217\001\n\022StreamingRecognize\0228.google.cloud."
+          + "speech.v1p1beta1.StreamingRecognizeReque"
+          + "st\0329.google.cloud.speech.v1p1beta1.Strea"
+          + "mingRecognizeResponse\"\000(\0010\001\032I\312A\025speech.g"
+          + "oogleapis.com\322A.https://www.googleapis.c"
+          + "om/auth/cloud-platformB\200\001\n!com.google.cl"
+          + "oud.speech.v1p1beta1B\013SpeechProtoP\001ZCgoo"
+          + "gle.golang.org/genproto/googleapis/cloud"
+          + "/speech/v1p1beta1;speech\370\001\001\242\002\003GCSb\006proto"
+          + "3"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
         new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -247,10 +253,11 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
         descriptorData,
         new com.google.protobuf.Descriptors.FileDescriptor[] {
           com.google.api.AnnotationsProto.getDescriptor(),
+          com.google.api.ClientProto.getDescriptor(),
+          com.google.api.FieldBehaviorProto.getDescriptor(),
           com.google.longrunning.OperationsProto.getDescriptor(),
           com.google.protobuf.AnyProto.getDescriptor(),
           com.google.protobuf.DurationProto.getDescriptor(),
-          com.google.protobuf.EmptyProto.getDescriptor(),
           com.google.protobuf.TimestampProto.getDescriptor(),
           com.google.rpc.StatusProto.getDescriptor(),
         },
@@ -418,14 +425,20 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
             });
     com.google.protobuf.ExtensionRegistry registry =
         com.google.protobuf.ExtensionRegistry.newInstance();
+    registry.add(com.google.api.ClientProto.defaultHost);
+    registry.add(com.google.api.FieldBehaviorProto.fieldBehavior);
     registry.add(com.google.api.AnnotationsProto.http);
+    registry.add(com.google.api.ClientProto.methodSignature);
+    registry.add(com.google.api.ClientProto.oauthScopes);
+    registry.add(com.google.longrunning.OperationsProto.operationInfo);
     com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor(
         descriptor, registry);
     com.google.api.AnnotationsProto.getDescriptor();
+    com.google.api.ClientProto.getDescriptor();
+    com.google.api.FieldBehaviorProto.getDescriptor();
     com.google.longrunning.OperationsProto.getDescriptor();
     com.google.protobuf.AnyProto.getDescriptor();
     com.google.protobuf.DurationProto.getDescriptor();
-    com.google.protobuf.EmptyProto.getDescriptor();
     com.google.protobuf.TimestampProto.getDescriptor();
     com.google.rpc.StatusProto.getDescriptor();
   }
diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechRecognitionAlternative.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechRecognitionAlternative.java
index b19f9e42d..c556c0953 100644
--- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechRecognitionAlternative.java
+++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechRecognitionAlternative.java
@@ -133,7 +133,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
    *
    *
    * 
-   * Output only. Transcript text representing the words that the user spoke.
+   * Transcript text representing the words that the user spoke.
    * 
* * string transcript = 1; @@ -153,7 +153,7 @@ public java.lang.String getTranscript() { * * *
-   * Output only. Transcript text representing the words that the user spoke.
+   * Transcript text representing the words that the user spoke.
    * 
* * string transcript = 1; @@ -176,7 +176,7 @@ public com.google.protobuf.ByteString getTranscriptBytes() { * * *
-   * Output only. The confidence estimate between 0.0 and 1.0. A higher number
+   * The confidence estimate between 0.0 and 1.0. A higher number
    * indicates an estimated greater likelihood that the recognized words are
    * correct. This field is set only for the top alternative of a non-streaming
    * result or, of a streaming result where `is_final=true`.
@@ -197,7 +197,7 @@ public float getConfidence() {
    *
    *
    * 
-   * Output only. A list of word-specific information for each recognized word.
+   * A list of word-specific information for each recognized word.
    * Note: When `enable_speaker_diarization` is true, you will see all the words
    * from the beginning of the audio.
    * 
@@ -211,7 +211,7 @@ public java.util.List getWordsList() * * *
-   * Output only. A list of word-specific information for each recognized word.
+   * A list of word-specific information for each recognized word.
    * Note: When `enable_speaker_diarization` is true, you will see all the words
    * from the beginning of the audio.
    * 
@@ -226,7 +226,7 @@ public java.util.List getWordsList() * * *
-   * Output only. A list of word-specific information for each recognized word.
+   * A list of word-specific information for each recognized word.
    * Note: When `enable_speaker_diarization` is true, you will see all the words
    * from the beginning of the audio.
    * 
@@ -240,7 +240,7 @@ public int getWordsCount() { * * *
-   * Output only. A list of word-specific information for each recognized word.
+   * A list of word-specific information for each recognized word.
    * Note: When `enable_speaker_diarization` is true, you will see all the words
    * from the beginning of the audio.
    * 
@@ -254,7 +254,7 @@ public com.google.cloud.speech.v1p1beta1.WordInfo getWords(int index) { * * *
-   * Output only. A list of word-specific information for each recognized word.
+   * A list of word-specific information for each recognized word.
    * Note: When `enable_speaker_diarization` is true, you will see all the words
    * from the beginning of the audio.
    * 
@@ -668,7 +668,7 @@ public Builder mergeFrom( * * *
-     * Output only. Transcript text representing the words that the user spoke.
+     * Transcript text representing the words that the user spoke.
      * 
* * string transcript = 1; @@ -688,7 +688,7 @@ public java.lang.String getTranscript() { * * *
-     * Output only. Transcript text representing the words that the user spoke.
+     * Transcript text representing the words that the user spoke.
      * 
* * string transcript = 1; @@ -708,7 +708,7 @@ public com.google.protobuf.ByteString getTranscriptBytes() { * * *
-     * Output only. Transcript text representing the words that the user spoke.
+     * Transcript text representing the words that the user spoke.
      * 
* * string transcript = 1; @@ -726,7 +726,7 @@ public Builder setTranscript(java.lang.String value) { * * *
-     * Output only. Transcript text representing the words that the user spoke.
+     * Transcript text representing the words that the user spoke.
      * 
* * string transcript = 1; @@ -741,7 +741,7 @@ public Builder clearTranscript() { * * *
-     * Output only. Transcript text representing the words that the user spoke.
+     * Transcript text representing the words that the user spoke.
      * 
* * string transcript = 1; @@ -762,7 +762,7 @@ public Builder setTranscriptBytes(com.google.protobuf.ByteString value) { * * *
-     * Output only. The confidence estimate between 0.0 and 1.0. A higher number
+     * The confidence estimate between 0.0 and 1.0. A higher number
      * indicates an estimated greater likelihood that the recognized words are
      * correct. This field is set only for the top alternative of a non-streaming
      * result or, of a streaming result where `is_final=true`.
@@ -780,7 +780,7 @@ public float getConfidence() {
      *
      *
      * 
-     * Output only. The confidence estimate between 0.0 and 1.0. A higher number
+     * The confidence estimate between 0.0 and 1.0. A higher number
      * indicates an estimated greater likelihood that the recognized words are
      * correct. This field is set only for the top alternative of a non-streaming
      * result or, of a streaming result where `is_final=true`.
@@ -801,7 +801,7 @@ public Builder setConfidence(float value) {
      *
      *
      * 
-     * Output only. The confidence estimate between 0.0 and 1.0. A higher number
+     * The confidence estimate between 0.0 and 1.0. A higher number
      * indicates an estimated greater likelihood that the recognized words are
      * correct. This field is set only for the top alternative of a non-streaming
      * result or, of a streaming result where `is_final=true`.
@@ -839,7 +839,7 @@ private void ensureWordsIsMutable() {
      *
      *
      * 
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -857,7 +857,7 @@ public java.util.List getWordsList() * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -875,7 +875,7 @@ public int getWordsCount() { * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -893,7 +893,7 @@ public com.google.cloud.speech.v1p1beta1.WordInfo getWords(int index) { * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -917,7 +917,7 @@ public Builder setWords(int index, com.google.cloud.speech.v1p1beta1.WordInfo va * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -939,7 +939,7 @@ public Builder setWords( * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -963,7 +963,7 @@ public Builder addWords(com.google.cloud.speech.v1p1beta1.WordInfo value) { * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -987,7 +987,7 @@ public Builder addWords(int index, com.google.cloud.speech.v1p1beta1.WordInfo va * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -1008,7 +1008,7 @@ public Builder addWords(com.google.cloud.speech.v1p1beta1.WordInfo.Builder build * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -1030,7 +1030,7 @@ public Builder addWords( * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -1052,7 +1052,7 @@ public Builder addAllWords( * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -1073,7 +1073,7 @@ public Builder clearWords() { * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -1094,7 +1094,7 @@ public Builder removeWords(int index) { * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -1108,7 +1108,7 @@ public com.google.cloud.speech.v1p1beta1.WordInfo.Builder getWordsBuilder(int in * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -1126,7 +1126,7 @@ public com.google.cloud.speech.v1p1beta1.WordInfoOrBuilder getWordsOrBuilder(int * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -1145,7 +1145,7 @@ public com.google.cloud.speech.v1p1beta1.WordInfoOrBuilder getWordsOrBuilder(int * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -1160,7 +1160,7 @@ public com.google.cloud.speech.v1p1beta1.WordInfo.Builder addWordsBuilder() { * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
@@ -1175,7 +1175,7 @@ public com.google.cloud.speech.v1p1beta1.WordInfo.Builder addWordsBuilder(int in * * *
-     * Output only. A list of word-specific information for each recognized word.
+     * A list of word-specific information for each recognized word.
      * Note: When `enable_speaker_diarization` is true, you will see all the words
      * from the beginning of the audio.
      * 
diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechRecognitionAlternativeOrBuilder.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechRecognitionAlternativeOrBuilder.java index 352241db2..80f3e4673 100644 --- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechRecognitionAlternativeOrBuilder.java +++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechRecognitionAlternativeOrBuilder.java @@ -27,7 +27,7 @@ public interface SpeechRecognitionAlternativeOrBuilder * * *
-   * Output only. Transcript text representing the words that the user spoke.
+   * Transcript text representing the words that the user spoke.
    * 
* * string transcript = 1; @@ -37,7 +37,7 @@ public interface SpeechRecognitionAlternativeOrBuilder * * *
-   * Output only. Transcript text representing the words that the user spoke.
+   * Transcript text representing the words that the user spoke.
    * 
* * string transcript = 1; @@ -48,7 +48,7 @@ public interface SpeechRecognitionAlternativeOrBuilder * * *
-   * Output only. The confidence estimate between 0.0 and 1.0. A higher number
+   * The confidence estimate between 0.0 and 1.0. A higher number
    * indicates an estimated greater likelihood that the recognized words are
    * correct. This field is set only for the top alternative of a non-streaming
    * result or, of a streaming result where `is_final=true`.
@@ -65,7 +65,7 @@ public interface SpeechRecognitionAlternativeOrBuilder
    *
    *
    * 
-   * Output only. A list of word-specific information for each recognized word.
+   * A list of word-specific information for each recognized word.
    * Note: When `enable_speaker_diarization` is true, you will see all the words
    * from the beginning of the audio.
    * 
@@ -77,7 +77,7 @@ public interface SpeechRecognitionAlternativeOrBuilder * * *
-   * Output only. A list of word-specific information for each recognized word.
+   * A list of word-specific information for each recognized word.
    * Note: When `enable_speaker_diarization` is true, you will see all the words
    * from the beginning of the audio.
    * 
@@ -89,7 +89,7 @@ public interface SpeechRecognitionAlternativeOrBuilder * * *
-   * Output only. A list of word-specific information for each recognized word.
+   * A list of word-specific information for each recognized word.
    * Note: When `enable_speaker_diarization` is true, you will see all the words
    * from the beginning of the audio.
    * 
@@ -101,7 +101,7 @@ public interface SpeechRecognitionAlternativeOrBuilder * * *
-   * Output only. A list of word-specific information for each recognized word.
+   * A list of word-specific information for each recognized word.
    * Note: When `enable_speaker_diarization` is true, you will see all the words
    * from the beginning of the audio.
    * 
@@ -114,7 +114,7 @@ public interface SpeechRecognitionAlternativeOrBuilder * * *
-   * Output only. A list of word-specific information for each recognized word.
+   * A list of word-specific information for each recognized word.
    * Note: When `enable_speaker_diarization` is true, you will see all the words
    * from the beginning of the audio.
    * 
diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechRecognitionResult.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechRecognitionResult.java index 4823a1127..cd2bfa4a7 100644 --- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechRecognitionResult.java +++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechRecognitionResult.java @@ -137,7 +137,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -154,7 +154,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -172,7 +172,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -188,7 +188,7 @@ public int getAlternativesCount() {
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -204,7 +204,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative getAlterna
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -241,10 +241,9 @@ public int getChannelTag() {
    *
    *
    * 
-   * Output only. The
-   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-   * language in this result. This language code was detected to have the most
-   * likelihood of being spoken in the audio.
+   * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag
+   * of the language in this result. This language code was detected to have
+   * the most likelihood of being spoken in the audio.
    * 
* * string language_code = 5; @@ -264,10 +263,9 @@ public java.lang.String getLanguageCode() { * * *
-   * Output only. The
-   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-   * language in this result. This language code was detected to have the most
-   * likelihood of being spoken in the audio.
+   * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag
+   * of the language in this result. This language code was detected to have
+   * the most likelihood of being spoken in the audio.
    * 
* * string language_code = 5; @@ -700,7 +698,7 @@ private void ensureAlternativesIsMutable() { * * *
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -721,7 +719,7 @@ private void ensureAlternativesIsMutable() {
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -741,7 +739,7 @@ public int getAlternativesCount() {
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -762,7 +760,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative getAlterna
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -789,7 +787,7 @@ public Builder setAlternatives(
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -814,7 +812,7 @@ public Builder setAlternatives(
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -841,7 +839,7 @@ public Builder addAlternatives(
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -868,7 +866,7 @@ public Builder addAlternatives(
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -892,7 +890,7 @@ public Builder addAlternatives(
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -917,7 +915,7 @@ public Builder addAlternatives(
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -942,7 +940,7 @@ public Builder addAllAlternatives(
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -965,7 +963,7 @@ public Builder clearAlternatives() {
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -988,7 +986,7 @@ public Builder removeAlternatives(int index) {
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1005,7 +1003,7 @@ public Builder removeAlternatives(int index) {
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1026,7 +1024,7 @@ public Builder removeAlternatives(int index) {
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1048,7 +1046,7 @@ public Builder removeAlternatives(int index) {
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1067,7 +1065,7 @@ public Builder removeAlternatives(int index) {
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1087,7 +1085,7 @@ public Builder removeAlternatives(int index) {
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1173,10 +1171,9 @@ public Builder clearChannelTag() {
      *
      *
      * 
-     * Output only. The
-     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-     * language in this result. This language code was detected to have the most
-     * likelihood of being spoken in the audio.
+     * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag
+     * of the language in this result. This language code was detected to have
+     * the most likelihood of being spoken in the audio.
      * 
* * string language_code = 5; @@ -1196,10 +1193,9 @@ public java.lang.String getLanguageCode() { * * *
-     * Output only. The
-     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-     * language in this result. This language code was detected to have the most
-     * likelihood of being spoken in the audio.
+     * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag
+     * of the language in this result. This language code was detected to have
+     * the most likelihood of being spoken in the audio.
      * 
* * string language_code = 5; @@ -1219,10 +1215,9 @@ public com.google.protobuf.ByteString getLanguageCodeBytes() { * * *
-     * Output only. The
-     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-     * language in this result. This language code was detected to have the most
-     * likelihood of being spoken in the audio.
+     * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag
+     * of the language in this result. This language code was detected to have
+     * the most likelihood of being spoken in the audio.
      * 
* * string language_code = 5; @@ -1240,10 +1235,9 @@ public Builder setLanguageCode(java.lang.String value) { * * *
-     * Output only. The
-     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-     * language in this result. This language code was detected to have the most
-     * likelihood of being spoken in the audio.
+     * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag
+     * of the language in this result. This language code was detected to have
+     * the most likelihood of being spoken in the audio.
      * 
* * string language_code = 5; @@ -1258,10 +1252,9 @@ public Builder clearLanguageCode() { * * *
-     * Output only. The
-     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-     * language in this result. This language code was detected to have the most
-     * likelihood of being spoken in the audio.
+     * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag
+     * of the language in this result. This language code was detected to have
+     * the most likelihood of being spoken in the audio.
      * 
* * string language_code = 5; diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechRecognitionResultOrBuilder.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechRecognitionResultOrBuilder.java index 1b3a6e856..6f5ce919d 100644 --- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechRecognitionResultOrBuilder.java +++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechRecognitionResultOrBuilder.java @@ -27,7 +27,7 @@ public interface SpeechRecognitionResultOrBuilder * * *
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -42,7 +42,7 @@ public interface SpeechRecognitionResultOrBuilder
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -56,7 +56,7 @@ public interface SpeechRecognitionResultOrBuilder
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -70,7 +70,7 @@ public interface SpeechRecognitionResultOrBuilder
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -85,7 +85,7 @@ public interface SpeechRecognitionResultOrBuilder
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -114,10 +114,9 @@ com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternativeOrBuilder getAlter
    *
    *
    * 
-   * Output only. The
-   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-   * language in this result. This language code was detected to have the most
-   * likelihood of being spoken in the audio.
+   * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag
+   * of the language in this result. This language code was detected to have
+   * the most likelihood of being spoken in the audio.
    * 
* * string language_code = 5; @@ -127,10 +126,9 @@ com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternativeOrBuilder getAlter * * *
-   * Output only. The
-   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-   * language in this result. This language code was detected to have the most
-   * likelihood of being spoken in the audio.
+   * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag
+   * of the language in this result. This language code was detected to have
+   * the most likelihood of being spoken in the audio.
    * 
* * string language_code = 5; diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognitionConfig.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognitionConfig.java index 2f013e6b4..209925ba6 100644 --- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognitionConfig.java +++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognitionConfig.java @@ -131,11 +131,13 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public boolean hasConfig() { return config_ != null; @@ -144,11 +146,13 @@ public boolean hasConfig() { * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1p1beta1.RecognitionConfig getConfig() { return config_ == null @@ -159,11 +163,13 @@ public com.google.cloud.speech.v1p1beta1.RecognitionConfig getConfig() { * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1p1beta1.RecognitionConfigOrBuilder getConfigOrBuilder() { return getConfig(); @@ -175,7 +181,7 @@ public com.google.cloud.speech.v1p1beta1.RecognitionConfigOrBuilder getConfigOrB * * *
-   * *Optional* If `false` or omitted, the recognizer will perform continuous
+   * If `false` or omitted, the recognizer will perform continuous
    * recognition (continuing to wait for and process audio even if the user
    * pauses speaking) until the client closes the input stream (gRPC API) or
    * until the maximum time limit has been reached. May return multiple
@@ -199,7 +205,7 @@ public boolean getSingleUtterance() {
    *
    *
    * 
-   * *Optional* If `true`, interim results (tentative hypotheses) may be
+   * If `true`, interim results (tentative hypotheses) may be
    * returned as they become available (these interim results are indicated with
    * the `is_final=false` flag).
    * If `false` or omitted, only `is_final=true` result(s) are returned.
@@ -585,11 +591,13 @@ public Builder mergeFrom(
      *
      *
      * 
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public boolean hasConfig() { return configBuilder_ != null || config_ != null; @@ -598,11 +606,13 @@ public boolean hasConfig() { * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1p1beta1.RecognitionConfig getConfig() { if (configBuilder_ == null) { @@ -617,11 +627,13 @@ public com.google.cloud.speech.v1p1beta1.RecognitionConfig getConfig() { * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder setConfig(com.google.cloud.speech.v1p1beta1.RecognitionConfig value) { if (configBuilder_ == null) { @@ -640,11 +652,13 @@ public Builder setConfig(com.google.cloud.speech.v1p1beta1.RecognitionConfig val * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder setConfig( com.google.cloud.speech.v1p1beta1.RecognitionConfig.Builder builderForValue) { @@ -661,11 +675,13 @@ public Builder setConfig( * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder mergeConfig(com.google.cloud.speech.v1p1beta1.RecognitionConfig value) { if (configBuilder_ == null) { @@ -688,11 +704,13 @@ public Builder mergeConfig(com.google.cloud.speech.v1p1beta1.RecognitionConfig v * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public Builder clearConfig() { if (configBuilder_ == null) { @@ -709,11 +727,13 @@ public Builder clearConfig() { * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1p1beta1.RecognitionConfig.Builder getConfigBuilder() { @@ -724,11 +744,13 @@ public com.google.cloud.speech.v1p1beta1.RecognitionConfig.Builder getConfigBuil * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ public com.google.cloud.speech.v1p1beta1.RecognitionConfigOrBuilder getConfigOrBuilder() { if (configBuilder_ != null) { @@ -743,11 +765,13 @@ public com.google.cloud.speech.v1p1beta1.RecognitionConfigOrBuilder getConfigOrB * * *
-     * *Required* Provides information to the recognizer that specifies how to
+     * Required. Provides information to the recognizer that specifies how to
      * process the request.
      * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.speech.v1p1beta1.RecognitionConfig, @@ -771,7 +795,7 @@ public com.google.cloud.speech.v1p1beta1.RecognitionConfigOrBuilder getConfigOrB * * *
-     * *Optional* If `false` or omitted, the recognizer will perform continuous
+     * If `false` or omitted, the recognizer will perform continuous
      * recognition (continuing to wait for and process audio even if the user
      * pauses speaking) until the client closes the input stream (gRPC API) or
      * until the maximum time limit has been reached. May return multiple
@@ -792,7 +816,7 @@ public boolean getSingleUtterance() {
      *
      *
      * 
-     * *Optional* If `false` or omitted, the recognizer will perform continuous
+     * If `false` or omitted, the recognizer will perform continuous
      * recognition (continuing to wait for and process audio even if the user
      * pauses speaking) until the client closes the input stream (gRPC API) or
      * until the maximum time limit has been reached. May return multiple
@@ -816,7 +840,7 @@ public Builder setSingleUtterance(boolean value) {
      *
      *
      * 
-     * *Optional* If `false` or omitted, the recognizer will perform continuous
+     * If `false` or omitted, the recognizer will perform continuous
      * recognition (continuing to wait for and process audio even if the user
      * pauses speaking) until the client closes the input stream (gRPC API) or
      * until the maximum time limit has been reached. May return multiple
@@ -842,7 +866,7 @@ public Builder clearSingleUtterance() {
      *
      *
      * 
-     * *Optional* If `true`, interim results (tentative hypotheses) may be
+     * If `true`, interim results (tentative hypotheses) may be
      * returned as they become available (these interim results are indicated with
      * the `is_final=false` flag).
      * If `false` or omitted, only `is_final=true` result(s) are returned.
@@ -857,7 +881,7 @@ public boolean getInterimResults() {
      *
      *
      * 
-     * *Optional* If `true`, interim results (tentative hypotheses) may be
+     * If `true`, interim results (tentative hypotheses) may be
      * returned as they become available (these interim results are indicated with
      * the `is_final=false` flag).
      * If `false` or omitted, only `is_final=true` result(s) are returned.
@@ -875,7 +899,7 @@ public Builder setInterimResults(boolean value) {
      *
      *
      * 
-     * *Optional* If `true`, interim results (tentative hypotheses) may be
+     * If `true`, interim results (tentative hypotheses) may be
      * returned as they become available (these interim results are indicated with
      * the `is_final=false` flag).
      * If `false` or omitted, only `is_final=true` result(s) are returned.
diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognitionConfigOrBuilder.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognitionConfigOrBuilder.java
index 6bf07bf7e..8b47cc16f 100644
--- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognitionConfigOrBuilder.java
+++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognitionConfigOrBuilder.java
@@ -27,33 +27,39 @@ public interface StreamingRecognitionConfigOrBuilder
    *
    *
    * 
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ boolean hasConfig(); /** * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ com.google.cloud.speech.v1p1beta1.RecognitionConfig getConfig(); /** * * *
-   * *Required* Provides information to the recognizer that specifies how to
+   * Required. Provides information to the recognizer that specifies how to
    * process the request.
    * 
* - * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1; + * + * .google.cloud.speech.v1p1beta1.RecognitionConfig config = 1 [(.google.api.field_behavior) = REQUIRED]; + * */ com.google.cloud.speech.v1p1beta1.RecognitionConfigOrBuilder getConfigOrBuilder(); @@ -61,7 +67,7 @@ public interface StreamingRecognitionConfigOrBuilder * * *
-   * *Optional* If `false` or omitted, the recognizer will perform continuous
+   * If `false` or omitted, the recognizer will perform continuous
    * recognition (continuing to wait for and process audio even if the user
    * pauses speaking) until the client closes the input stream (gRPC API) or
    * until the maximum time limit has been reached. May return multiple
@@ -81,7 +87,7 @@ public interface StreamingRecognitionConfigOrBuilder
    *
    *
    * 
-   * *Optional* If `true`, interim results (tentative hypotheses) may be
+   * If `true`, interim results (tentative hypotheses) may be
    * returned as they become available (these interim results are indicated with
    * the `is_final=false` flag).
    * If `false` or omitted, only `is_final=true` result(s) are returned.
diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognitionResult.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognitionResult.java
index b3ea8fed8..abf1cec71 100644
--- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognitionResult.java
+++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognitionResult.java
@@ -163,7 +163,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -180,7 +180,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -198,7 +198,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -214,7 +214,7 @@ public int getAlternativesCount() {
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -230,7 +230,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative getAlterna
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -250,7 +250,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative getAlterna
    *
    *
    * 
-   * Output only. If `false`, this `StreamingRecognitionResult` represents an
+   * If `false`, this `StreamingRecognitionResult` represents an
    * interim result that may change. If `true`, this is the final time the
    * speech service will return this particular `StreamingRecognitionResult`,
    * the recognizer will not return any further hypotheses for this portion of
@@ -269,7 +269,7 @@ public boolean getIsFinal() {
    *
    *
    * 
-   * Output only. An estimate of the likelihood that the recognizer will not
+   * An estimate of the likelihood that the recognizer will not
    * change its guess about this interim result. Values range from 0.0
    * (completely unstable) to 1.0 (completely stable).
    * This field is only provided for interim results (`is_final=false`).
@@ -288,7 +288,7 @@ public float getStability() {
    *
    *
    * 
-   * Output only. Time offset of the end of this result relative to the
+   * Time offset of the end of this result relative to the
    * beginning of the audio.
    * 
* @@ -301,7 +301,7 @@ public boolean hasResultEndTime() { * * *
-   * Output only. Time offset of the end of this result relative to the
+   * Time offset of the end of this result relative to the
    * beginning of the audio.
    * 
* @@ -316,7 +316,7 @@ public com.google.protobuf.Duration getResultEndTime() { * * *
-   * Output only. Time offset of the end of this result relative to the
+   * Time offset of the end of this result relative to the
    * beginning of the audio.
    * 
* @@ -349,10 +349,9 @@ public int getChannelTag() { * * *
-   * Output only. The
-   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-   * language in this result. This language code was detected to have the most
-   * likelihood of being spoken in the audio.
+   * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag
+   * of the language in this result. This language code was detected to have
+   * the most likelihood of being spoken in the audio.
    * 
* * string language_code = 6; @@ -372,10 +371,9 @@ public java.lang.String getLanguageCode() { * * *
-   * Output only. The
-   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-   * language in this result. This language code was detected to have the most
-   * likelihood of being spoken in the audio.
+   * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag
+   * of the language in this result. This language code was detected to have
+   * the most likelihood of being spoken in the audio.
    * 
* * string language_code = 6; @@ -870,7 +868,7 @@ private void ensureAlternativesIsMutable() { * * *
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -891,7 +889,7 @@ private void ensureAlternativesIsMutable() {
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -911,7 +909,7 @@ public int getAlternativesCount() {
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -932,7 +930,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative getAlterna
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -959,7 +957,7 @@ public Builder setAlternatives(
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -984,7 +982,7 @@ public Builder setAlternatives(
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1011,7 +1009,7 @@ public Builder addAlternatives(
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1038,7 +1036,7 @@ public Builder addAlternatives(
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1062,7 +1060,7 @@ public Builder addAlternatives(
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1087,7 +1085,7 @@ public Builder addAlternatives(
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1112,7 +1110,7 @@ public Builder addAllAlternatives(
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1135,7 +1133,7 @@ public Builder clearAlternatives() {
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1158,7 +1156,7 @@ public Builder removeAlternatives(int index) {
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1175,7 +1173,7 @@ public Builder removeAlternatives(int index) {
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1196,7 +1194,7 @@ public Builder removeAlternatives(int index) {
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1218,7 +1216,7 @@ public Builder removeAlternatives(int index) {
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1237,7 +1235,7 @@ public Builder removeAlternatives(int index) {
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1257,7 +1255,7 @@ public Builder removeAlternatives(int index) {
      *
      *
      * 
-     * Output only. May contain one or more recognition hypotheses (up to the
+     * May contain one or more recognition hypotheses (up to the
      * maximum specified in `max_alternatives`).
      * These alternatives are ordered in terms of accuracy, with the top (first)
      * alternative being the most probable, as ranked by the recognizer.
@@ -1293,7 +1291,7 @@ public Builder removeAlternatives(int index) {
      *
      *
      * 
-     * Output only. If `false`, this `StreamingRecognitionResult` represents an
+     * If `false`, this `StreamingRecognitionResult` represents an
      * interim result that may change. If `true`, this is the final time the
      * speech service will return this particular `StreamingRecognitionResult`,
      * the recognizer will not return any further hypotheses for this portion of
@@ -1309,7 +1307,7 @@ public boolean getIsFinal() {
      *
      *
      * 
-     * Output only. If `false`, this `StreamingRecognitionResult` represents an
+     * If `false`, this `StreamingRecognitionResult` represents an
      * interim result that may change. If `true`, this is the final time the
      * speech service will return this particular `StreamingRecognitionResult`,
      * the recognizer will not return any further hypotheses for this portion of
@@ -1328,7 +1326,7 @@ public Builder setIsFinal(boolean value) {
      *
      *
      * 
-     * Output only. If `false`, this `StreamingRecognitionResult` represents an
+     * If `false`, this `StreamingRecognitionResult` represents an
      * interim result that may change. If `true`, this is the final time the
      * speech service will return this particular `StreamingRecognitionResult`,
      * the recognizer will not return any further hypotheses for this portion of
@@ -1349,7 +1347,7 @@ public Builder clearIsFinal() {
      *
      *
      * 
-     * Output only. An estimate of the likelihood that the recognizer will not
+     * An estimate of the likelihood that the recognizer will not
      * change its guess about this interim result. Values range from 0.0
      * (completely unstable) to 1.0 (completely stable).
      * This field is only provided for interim results (`is_final=false`).
@@ -1365,7 +1363,7 @@ public float getStability() {
      *
      *
      * 
-     * Output only. An estimate of the likelihood that the recognizer will not
+     * An estimate of the likelihood that the recognizer will not
      * change its guess about this interim result. Values range from 0.0
      * (completely unstable) to 1.0 (completely stable).
      * This field is only provided for interim results (`is_final=false`).
@@ -1384,7 +1382,7 @@ public Builder setStability(float value) {
      *
      *
      * 
-     * Output only. An estimate of the likelihood that the recognizer will not
+     * An estimate of the likelihood that the recognizer will not
      * change its guess about this interim result. Values range from 0.0
      * (completely unstable) to 1.0 (completely stable).
      * This field is only provided for interim results (`is_final=false`).
@@ -1410,7 +1408,7 @@ public Builder clearStability() {
      *
      *
      * 
-     * Output only. Time offset of the end of this result relative to the
+     * Time offset of the end of this result relative to the
      * beginning of the audio.
      * 
* @@ -1423,7 +1421,7 @@ public boolean hasResultEndTime() { * * *
-     * Output only. Time offset of the end of this result relative to the
+     * Time offset of the end of this result relative to the
      * beginning of the audio.
      * 
* @@ -1442,7 +1440,7 @@ public com.google.protobuf.Duration getResultEndTime() { * * *
-     * Output only. Time offset of the end of this result relative to the
+     * Time offset of the end of this result relative to the
      * beginning of the audio.
      * 
* @@ -1465,7 +1463,7 @@ public Builder setResultEndTime(com.google.protobuf.Duration value) { * * *
-     * Output only. Time offset of the end of this result relative to the
+     * Time offset of the end of this result relative to the
      * beginning of the audio.
      * 
* @@ -1485,7 +1483,7 @@ public Builder setResultEndTime(com.google.protobuf.Duration.Builder builderForV * * *
-     * Output only. Time offset of the end of this result relative to the
+     * Time offset of the end of this result relative to the
      * beginning of the audio.
      * 
* @@ -1512,7 +1510,7 @@ public Builder mergeResultEndTime(com.google.protobuf.Duration value) { * * *
-     * Output only. Time offset of the end of this result relative to the
+     * Time offset of the end of this result relative to the
      * beginning of the audio.
      * 
* @@ -1533,7 +1531,7 @@ public Builder clearResultEndTime() { * * *
-     * Output only. Time offset of the end of this result relative to the
+     * Time offset of the end of this result relative to the
      * beginning of the audio.
      * 
* @@ -1548,7 +1546,7 @@ public com.google.protobuf.Duration.Builder getResultEndTimeBuilder() { * * *
-     * Output only. Time offset of the end of this result relative to the
+     * Time offset of the end of this result relative to the
      * beginning of the audio.
      * 
* @@ -1567,7 +1565,7 @@ public com.google.protobuf.DurationOrBuilder getResultEndTimeOrBuilder() { * * *
-     * Output only. Time offset of the end of this result relative to the
+     * Time offset of the end of this result relative to the
      * beginning of the audio.
      * 
* @@ -1645,10 +1643,9 @@ public Builder clearChannelTag() { * * *
-     * Output only. The
-     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-     * language in this result. This language code was detected to have the most
-     * likelihood of being spoken in the audio.
+     * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag
+     * of the language in this result. This language code was detected to have
+     * the most likelihood of being spoken in the audio.
      * 
* * string language_code = 6; @@ -1668,10 +1665,9 @@ public java.lang.String getLanguageCode() { * * *
-     * Output only. The
-     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-     * language in this result. This language code was detected to have the most
-     * likelihood of being spoken in the audio.
+     * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag
+     * of the language in this result. This language code was detected to have
+     * the most likelihood of being spoken in the audio.
      * 
* * string language_code = 6; @@ -1691,10 +1687,9 @@ public com.google.protobuf.ByteString getLanguageCodeBytes() { * * *
-     * Output only. The
-     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-     * language in this result. This language code was detected to have the most
-     * likelihood of being spoken in the audio.
+     * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag
+     * of the language in this result. This language code was detected to have
+     * the most likelihood of being spoken in the audio.
      * 
* * string language_code = 6; @@ -1712,10 +1707,9 @@ public Builder setLanguageCode(java.lang.String value) { * * *
-     * Output only. The
-     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-     * language in this result. This language code was detected to have the most
-     * likelihood of being spoken in the audio.
+     * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag
+     * of the language in this result. This language code was detected to have
+     * the most likelihood of being spoken in the audio.
      * 
* * string language_code = 6; @@ -1730,10 +1724,9 @@ public Builder clearLanguageCode() { * * *
-     * Output only. The
-     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-     * language in this result. This language code was detected to have the most
-     * likelihood of being spoken in the audio.
+     * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag
+     * of the language in this result. This language code was detected to have
+     * the most likelihood of being spoken in the audio.
      * 
* * string language_code = 6; diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognitionResultOrBuilder.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognitionResultOrBuilder.java index d12ca23b5..ee0712b96 100644 --- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognitionResultOrBuilder.java +++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognitionResultOrBuilder.java @@ -27,7 +27,7 @@ public interface StreamingRecognitionResultOrBuilder * * *
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -42,7 +42,7 @@ public interface StreamingRecognitionResultOrBuilder
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -56,7 +56,7 @@ public interface StreamingRecognitionResultOrBuilder
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -70,7 +70,7 @@ public interface StreamingRecognitionResultOrBuilder
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -85,7 +85,7 @@ public interface StreamingRecognitionResultOrBuilder
    *
    *
    * 
-   * Output only. May contain one or more recognition hypotheses (up to the
+   * May contain one or more recognition hypotheses (up to the
    * maximum specified in `max_alternatives`).
    * These alternatives are ordered in terms of accuracy, with the top (first)
    * alternative being the most probable, as ranked by the recognizer.
@@ -101,7 +101,7 @@ com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternativeOrBuilder getAlter
    *
    *
    * 
-   * Output only. If `false`, this `StreamingRecognitionResult` represents an
+   * If `false`, this `StreamingRecognitionResult` represents an
    * interim result that may change. If `true`, this is the final time the
    * speech service will return this particular `StreamingRecognitionResult`,
    * the recognizer will not return any further hypotheses for this portion of
@@ -116,7 +116,7 @@ com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternativeOrBuilder getAlter
    *
    *
    * 
-   * Output only. An estimate of the likelihood that the recognizer will not
+   * An estimate of the likelihood that the recognizer will not
    * change its guess about this interim result. Values range from 0.0
    * (completely unstable) to 1.0 (completely stable).
    * This field is only provided for interim results (`is_final=false`).
@@ -131,7 +131,7 @@ com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternativeOrBuilder getAlter
    *
    *
    * 
-   * Output only. Time offset of the end of this result relative to the
+   * Time offset of the end of this result relative to the
    * beginning of the audio.
    * 
* @@ -142,7 +142,7 @@ com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternativeOrBuilder getAlter * * *
-   * Output only. Time offset of the end of this result relative to the
+   * Time offset of the end of this result relative to the
    * beginning of the audio.
    * 
* @@ -153,7 +153,7 @@ com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternativeOrBuilder getAlter * * *
-   * Output only. Time offset of the end of this result relative to the
+   * Time offset of the end of this result relative to the
    * beginning of the audio.
    * 
* @@ -178,10 +178,9 @@ com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternativeOrBuilder getAlter * * *
-   * Output only. The
-   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-   * language in this result. This language code was detected to have the most
-   * likelihood of being spoken in the audio.
+   * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag
+   * of the language in this result. This language code was detected to have
+   * the most likelihood of being spoken in the audio.
    * 
* * string language_code = 6; @@ -191,10 +190,9 @@ com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternativeOrBuilder getAlter * * *
-   * Output only. The
-   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-   * language in this result. This language code was detected to have the most
-   * likelihood of being spoken in the audio.
+   * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag
+   * of the language in this result. This language code was detected to have
+   * the most likelihood of being spoken in the audio.
    * 
* * string language_code = 6; diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognizeRequest.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognizeRequest.java index a4774f6ae..e94e196ef 100644 --- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognizeRequest.java +++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognizeRequest.java @@ -24,9 +24,9 @@ *
  * The top-level message sent by the client for the `StreamingRecognize` method.
  * Multiple `StreamingRecognizeRequest` messages are sent. The first message
- * must contain a `streaming_config` message and must not contain `audio` data.
- * All subsequent messages must contain `audio` data and must not contain a
- * `streaming_config` message.
+ * must contain a `streaming_config` message and must not contain
+ * `audio_content`. All subsequent messages must contain `audio_content` and
+ * must not contain a `streaming_config` message.
  * 
* * Protobuf type {@code google.cloud.speech.v1p1beta1.StreamingRecognizeRequest} @@ -231,9 +231,9 @@ public com.google.cloud.speech.v1p1beta1.StreamingRecognitionConfig getStreaming * `StreamingRecognizeRequest` message must not contain `audio_content` data * and all subsequent `StreamingRecognizeRequest` messages must contain * `audio_content` data. The audio bytes must be encoded as specified in - * `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a + * `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a * pure binary representation (not base64). See - * [content limits](/speech-to-text/quotas#content). + * [content limits](https://cloud.google.com/speech-to-text/quotas#content). *
* * bytes audio_content = 2; @@ -442,9 +442,9 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build *
    * The top-level message sent by the client for the `StreamingRecognize` method.
    * Multiple `StreamingRecognizeRequest` messages are sent. The first message
-   * must contain a `streaming_config` message and must not contain `audio` data.
-   * All subsequent messages must contain `audio` data and must not contain a
-   * `streaming_config` message.
+   * must contain a `streaming_config` message and must not contain
+   * `audio_content`. All subsequent messages must contain `audio_content` and
+   * must not contain a `streaming_config` message.
    * 
* * Protobuf type {@code google.cloud.speech.v1p1beta1.StreamingRecognizeRequest} @@ -872,9 +872,9 @@ public Builder clearStreamingConfig() { * `StreamingRecognizeRequest` message must not contain `audio_content` data * and all subsequent `StreamingRecognizeRequest` messages must contain * `audio_content` data. The audio bytes must be encoded as specified in - * `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a + * `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a * pure binary representation (not base64). See - * [content limits](/speech-to-text/quotas#content). + * [content limits](https://cloud.google.com/speech-to-text/quotas#content). *
* * bytes audio_content = 2; @@ -894,9 +894,9 @@ public com.google.protobuf.ByteString getAudioContent() { * `StreamingRecognizeRequest` message must not contain `audio_content` data * and all subsequent `StreamingRecognizeRequest` messages must contain * `audio_content` data. The audio bytes must be encoded as specified in - * `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a + * `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a * pure binary representation (not base64). See - * [content limits](/speech-to-text/quotas#content). + * [content limits](https://cloud.google.com/speech-to-text/quotas#content). *
* * bytes audio_content = 2; @@ -919,9 +919,9 @@ public Builder setAudioContent(com.google.protobuf.ByteString value) { * `StreamingRecognizeRequest` message must not contain `audio_content` data * and all subsequent `StreamingRecognizeRequest` messages must contain * `audio_content` data. The audio bytes must be encoded as specified in - * `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a + * `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a * pure binary representation (not base64). See - * [content limits](/speech-to-text/quotas#content). + * [content limits](https://cloud.google.com/speech-to-text/quotas#content). *
* * bytes audio_content = 2; diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognizeRequestOrBuilder.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognizeRequestOrBuilder.java index 2ba663996..09337172e 100644 --- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognizeRequestOrBuilder.java +++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognizeRequestOrBuilder.java @@ -70,9 +70,9 @@ public interface StreamingRecognizeRequestOrBuilder * `StreamingRecognizeRequest` message must not contain `audio_content` data * and all subsequent `StreamingRecognizeRequest` messages must contain * `audio_content` data. The audio bytes must be encoded as specified in - * `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a + * `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a * pure binary representation (not base64). See - * [content limits](/speech-to-text/quotas#content). + * [content limits](https://cloud.google.com/speech-to-text/quotas#content). *
* * bytes audio_content = 2; diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognizeResponse.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognizeResponse.java index a10a75f30..096a7490c 100644 --- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognizeResponse.java +++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognizeResponse.java @@ -319,8 +319,8 @@ private SpeechEventType(int value) { * * *
-   * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-   * message that specifies the error for the operation.
+   * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+   * specifies the error for the operation.
    * 
* * .google.rpc.Status error = 1; @@ -332,8 +332,8 @@ public boolean hasError() { * * *
-   * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-   * message that specifies the error for the operation.
+   * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+   * specifies the error for the operation.
    * 
* * .google.rpc.Status error = 1; @@ -345,8 +345,8 @@ public com.google.rpc.Status getError() { * * *
-   * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-   * message that specifies the error for the operation.
+   * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+   * specifies the error for the operation.
    * 
* * .google.rpc.Status error = 1; @@ -361,7 +361,7 @@ public com.google.rpc.StatusOrBuilder getErrorOrBuilder() { * * *
-   * Output only. This repeated list contains zero or more results that
+   * This repeated list contains zero or more results that
    * correspond to consecutive portions of the audio currently being processed.
    * It contains zero or one `is_final=true` result (the newly settled portion),
    * followed by zero or more `is_final=false` results (the interim results).
@@ -377,7 +377,7 @@ public com.google.rpc.StatusOrBuilder getErrorOrBuilder() {
    *
    *
    * 
-   * Output only. This repeated list contains zero or more results that
+   * This repeated list contains zero or more results that
    * correspond to consecutive portions of the audio currently being processed.
    * It contains zero or one `is_final=true` result (the newly settled portion),
    * followed by zero or more `is_final=false` results (the interim results).
@@ -394,7 +394,7 @@ public com.google.rpc.StatusOrBuilder getErrorOrBuilder() {
    *
    *
    * 
-   * Output only. This repeated list contains zero or more results that
+   * This repeated list contains zero or more results that
    * correspond to consecutive portions of the audio currently being processed.
    * It contains zero or one `is_final=true` result (the newly settled portion),
    * followed by zero or more `is_final=false` results (the interim results).
@@ -409,7 +409,7 @@ public int getResultsCount() {
    *
    *
    * 
-   * Output only. This repeated list contains zero or more results that
+   * This repeated list contains zero or more results that
    * correspond to consecutive portions of the audio currently being processed.
    * It contains zero or one `is_final=true` result (the newly settled portion),
    * followed by zero or more `is_final=false` results (the interim results).
@@ -424,7 +424,7 @@ public com.google.cloud.speech.v1p1beta1.StreamingRecognitionResult getResults(i
    *
    *
    * 
-   * Output only. This repeated list contains zero or more results that
+   * This repeated list contains zero or more results that
    * correspond to consecutive portions of the audio currently being processed.
    * It contains zero or one `is_final=true` result (the newly settled portion),
    * followed by zero or more `is_final=false` results (the interim results).
@@ -443,7 +443,7 @@ public com.google.cloud.speech.v1p1beta1.StreamingRecognitionResultOrBuilder get
    *
    *
    * 
-   * Output only. Indicates the type of speech event.
+   * Indicates the type of speech event.
    * 
* * @@ -457,7 +457,7 @@ public int getSpeechEventTypeValue() { * * *
-   * Output only. Indicates the type of speech event.
+   * Indicates the type of speech event.
    * 
* * @@ -932,8 +932,8 @@ public Builder mergeFrom( * * *
-     * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-     * message that specifies the error for the operation.
+     * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+     * specifies the error for the operation.
      * 
* * .google.rpc.Status error = 1; @@ -945,8 +945,8 @@ public boolean hasError() { * * *
-     * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-     * message that specifies the error for the operation.
+     * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+     * specifies the error for the operation.
      * 
* * .google.rpc.Status error = 1; @@ -962,8 +962,8 @@ public com.google.rpc.Status getError() { * * *
-     * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-     * message that specifies the error for the operation.
+     * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+     * specifies the error for the operation.
      * 
* * .google.rpc.Status error = 1; @@ -985,8 +985,8 @@ public Builder setError(com.google.rpc.Status value) { * * *
-     * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-     * message that specifies the error for the operation.
+     * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+     * specifies the error for the operation.
      * 
* * .google.rpc.Status error = 1; @@ -1005,8 +1005,8 @@ public Builder setError(com.google.rpc.Status.Builder builderForValue) { * * *
-     * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-     * message that specifies the error for the operation.
+     * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+     * specifies the error for the operation.
      * 
* * .google.rpc.Status error = 1; @@ -1029,8 +1029,8 @@ public Builder mergeError(com.google.rpc.Status value) { * * *
-     * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-     * message that specifies the error for the operation.
+     * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+     * specifies the error for the operation.
      * 
* * .google.rpc.Status error = 1; @@ -1050,8 +1050,8 @@ public Builder clearError() { * * *
-     * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-     * message that specifies the error for the operation.
+     * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+     * specifies the error for the operation.
      * 
* * .google.rpc.Status error = 1; @@ -1065,8 +1065,8 @@ public com.google.rpc.Status.Builder getErrorBuilder() { * * *
-     * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-     * message that specifies the error for the operation.
+     * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+     * specifies the error for the operation.
      * 
* * .google.rpc.Status error = 1; @@ -1082,8 +1082,8 @@ public com.google.rpc.StatusOrBuilder getErrorOrBuilder() { * * *
-     * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-     * message that specifies the error for the operation.
+     * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+     * specifies the error for the operation.
      * 
* * .google.rpc.Status error = 1; @@ -1124,7 +1124,7 @@ private void ensureResultsIsMutable() { * * *
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1144,7 +1144,7 @@ private void ensureResultsIsMutable() {
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1163,7 +1163,7 @@ public int getResultsCount() {
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1182,7 +1182,7 @@ public com.google.cloud.speech.v1p1beta1.StreamingRecognitionResult getResults(i
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1208,7 +1208,7 @@ public Builder setResults(
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1232,7 +1232,7 @@ public Builder setResults(
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1257,7 +1257,7 @@ public Builder addResults(com.google.cloud.speech.v1p1beta1.StreamingRecognition
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1283,7 +1283,7 @@ public Builder addResults(
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1306,7 +1306,7 @@ public Builder addResults(
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1330,7 +1330,7 @@ public Builder addResults(
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1354,7 +1354,7 @@ public Builder addAllResults(
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1376,7 +1376,7 @@ public Builder clearResults() {
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1398,7 +1398,7 @@ public Builder removeResults(int index) {
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1414,7 +1414,7 @@ public com.google.cloud.speech.v1p1beta1.StreamingRecognitionResult.Builder getR
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1434,7 +1434,7 @@ public com.google.cloud.speech.v1p1beta1.StreamingRecognitionResult.Builder getR
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1455,7 +1455,7 @@ public com.google.cloud.speech.v1p1beta1.StreamingRecognitionResult.Builder getR
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1473,7 +1473,7 @@ public com.google.cloud.speech.v1p1beta1.StreamingRecognitionResult.Builder getR
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1492,7 +1492,7 @@ public com.google.cloud.speech.v1p1beta1.StreamingRecognitionResult.Builder addR
      *
      *
      * 
-     * Output only. This repeated list contains zero or more results that
+     * This repeated list contains zero or more results that
      * correspond to consecutive portions of the audio currently being processed.
      * It contains zero or one `is_final=true` result (the newly settled portion),
      * followed by zero or more `is_final=false` results (the interim results).
@@ -1527,7 +1527,7 @@ public com.google.cloud.speech.v1p1beta1.StreamingRecognitionResult.Builder addR
      *
      *
      * 
-     * Output only. Indicates the type of speech event.
+     * Indicates the type of speech event.
      * 
* * @@ -1541,7 +1541,7 @@ public int getSpeechEventTypeValue() { * * *
-     * Output only. Indicates the type of speech event.
+     * Indicates the type of speech event.
      * 
* * @@ -1557,7 +1557,7 @@ public Builder setSpeechEventTypeValue(int value) { * * *
-     * Output only. Indicates the type of speech event.
+     * Indicates the type of speech event.
      * 
* * @@ -1579,7 +1579,7 @@ public Builder setSpeechEventTypeValue(int value) { * * *
-     * Output only. Indicates the type of speech event.
+     * Indicates the type of speech event.
      * 
* * @@ -1600,7 +1600,7 @@ public Builder setSpeechEventType( * * *
-     * Output only. Indicates the type of speech event.
+     * Indicates the type of speech event.
      * 
* * diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognizeResponseOrBuilder.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognizeResponseOrBuilder.java index ae0ae2ebf..e3092f361 100644 --- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognizeResponseOrBuilder.java +++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognizeResponseOrBuilder.java @@ -27,8 +27,8 @@ public interface StreamingRecognizeResponseOrBuilder * * *
-   * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-   * message that specifies the error for the operation.
+   * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+   * specifies the error for the operation.
    * 
* * .google.rpc.Status error = 1; @@ -38,8 +38,8 @@ public interface StreamingRecognizeResponseOrBuilder * * *
-   * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-   * message that specifies the error for the operation.
+   * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+   * specifies the error for the operation.
    * 
* * .google.rpc.Status error = 1; @@ -49,8 +49,8 @@ public interface StreamingRecognizeResponseOrBuilder * * *
-   * Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-   * message that specifies the error for the operation.
+   * If set, returns a [google.rpc.Status][google.rpc.Status] message that
+   * specifies the error for the operation.
    * 
* * .google.rpc.Status error = 1; @@ -61,7 +61,7 @@ public interface StreamingRecognizeResponseOrBuilder * * *
-   * Output only. This repeated list contains zero or more results that
+   * This repeated list contains zero or more results that
    * correspond to consecutive portions of the audio currently being processed.
    * It contains zero or one `is_final=true` result (the newly settled portion),
    * followed by zero or more `is_final=false` results (the interim results).
@@ -74,7 +74,7 @@ public interface StreamingRecognizeResponseOrBuilder
    *
    *
    * 
-   * Output only. This repeated list contains zero or more results that
+   * This repeated list contains zero or more results that
    * correspond to consecutive portions of the audio currently being processed.
    * It contains zero or one `is_final=true` result (the newly settled portion),
    * followed by zero or more `is_final=false` results (the interim results).
@@ -87,7 +87,7 @@ public interface StreamingRecognizeResponseOrBuilder
    *
    *
    * 
-   * Output only. This repeated list contains zero or more results that
+   * This repeated list contains zero or more results that
    * correspond to consecutive portions of the audio currently being processed.
    * It contains zero or one `is_final=true` result (the newly settled portion),
    * followed by zero or more `is_final=false` results (the interim results).
@@ -100,7 +100,7 @@ public interface StreamingRecognizeResponseOrBuilder
    *
    *
    * 
-   * Output only. This repeated list contains zero or more results that
+   * This repeated list contains zero or more results that
    * correspond to consecutive portions of the audio currently being processed.
    * It contains zero or one `is_final=true` result (the newly settled portion),
    * followed by zero or more `is_final=false` results (the interim results).
@@ -114,7 +114,7 @@ public interface StreamingRecognizeResponseOrBuilder
    *
    *
    * 
-   * Output only. This repeated list contains zero or more results that
+   * This repeated list contains zero or more results that
    * correspond to consecutive portions of the audio currently being processed.
    * It contains zero or one `is_final=true` result (the newly settled portion),
    * followed by zero or more `is_final=false` results (the interim results).
@@ -129,7 +129,7 @@ com.google.cloud.speech.v1p1beta1.StreamingRecognitionResultOrBuilder getResults
    *
    *
    * 
-   * Output only. Indicates the type of speech event.
+   * Indicates the type of speech event.
    * 
* * @@ -141,7 +141,7 @@ com.google.cloud.speech.v1p1beta1.StreamingRecognitionResultOrBuilder getResults * * *
-   * Output only. Indicates the type of speech event.
+   * Indicates the type of speech event.
    * 
* * diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/WordInfo.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/WordInfo.java index 934efcb02..2ca0d60e5 100644 --- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/WordInfo.java +++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/WordInfo.java @@ -152,7 +152,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-   * Output only. Time offset relative to the beginning of the audio,
+   * Time offset relative to the beginning of the audio,
    * and corresponding to the start of the spoken word.
    * This field is only set if `enable_word_time_offsets=true` and only
    * in the top hypothesis.
@@ -169,7 +169,7 @@ public boolean hasStartTime() {
    *
    *
    * 
-   * Output only. Time offset relative to the beginning of the audio,
+   * Time offset relative to the beginning of the audio,
    * and corresponding to the start of the spoken word.
    * This field is only set if `enable_word_time_offsets=true` and only
    * in the top hypothesis.
@@ -186,7 +186,7 @@ public com.google.protobuf.Duration getStartTime() {
    *
    *
    * 
-   * Output only. Time offset relative to the beginning of the audio,
+   * Time offset relative to the beginning of the audio,
    * and corresponding to the start of the spoken word.
    * This field is only set if `enable_word_time_offsets=true` and only
    * in the top hypothesis.
@@ -206,7 +206,7 @@ public com.google.protobuf.DurationOrBuilder getStartTimeOrBuilder() {
    *
    *
    * 
-   * Output only. Time offset relative to the beginning of the audio,
+   * Time offset relative to the beginning of the audio,
    * and corresponding to the end of the spoken word.
    * This field is only set if `enable_word_time_offsets=true` and only
    * in the top hypothesis.
@@ -223,7 +223,7 @@ public boolean hasEndTime() {
    *
    *
    * 
-   * Output only. Time offset relative to the beginning of the audio,
+   * Time offset relative to the beginning of the audio,
    * and corresponding to the end of the spoken word.
    * This field is only set if `enable_word_time_offsets=true` and only
    * in the top hypothesis.
@@ -240,7 +240,7 @@ public com.google.protobuf.Duration getEndTime() {
    *
    *
    * 
-   * Output only. Time offset relative to the beginning of the audio,
+   * Time offset relative to the beginning of the audio,
    * and corresponding to the end of the spoken word.
    * This field is only set if `enable_word_time_offsets=true` and only
    * in the top hypothesis.
@@ -260,7 +260,7 @@ public com.google.protobuf.DurationOrBuilder getEndTimeOrBuilder() {
    *
    *
    * 
-   * Output only. The word corresponding to this set of information.
+   * The word corresponding to this set of information.
    * 
* * string word = 3; @@ -280,7 +280,7 @@ public java.lang.String getWord() { * * *
-   * Output only. The word corresponding to this set of information.
+   * The word corresponding to this set of information.
    * 
* * string word = 3; @@ -303,7 +303,7 @@ public com.google.protobuf.ByteString getWordBytes() { * * *
-   * Output only. The confidence estimate between 0.0 and 1.0. A higher number
+   * The confidence estimate between 0.0 and 1.0. A higher number
    * indicates an estimated greater likelihood that the recognized words are
    * correct. This field is set only for the top alternative of a non-streaming
    * result or, of a streaming result where `is_final=true`.
@@ -324,7 +324,7 @@ public float getConfidence() {
    *
    *
    * 
-   * Output only. A distinct integer value is assigned for every speaker within
+   * A distinct integer value is assigned for every speaker within
    * the audio. This field specifies which one of those speakers was detected to
    * have spoken this word. Value ranges from '1' to diarization_speaker_count.
    * speaker_tag is set if enable_speaker_diarization = 'true' and only in the
@@ -750,7 +750,7 @@ public Builder mergeFrom(
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the start of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -767,7 +767,7 @@ public boolean hasStartTime() {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the start of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -788,7 +788,7 @@ public com.google.protobuf.Duration getStartTime() {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the start of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -815,7 +815,7 @@ public Builder setStartTime(com.google.protobuf.Duration value) {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the start of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -839,7 +839,7 @@ public Builder setStartTime(com.google.protobuf.Duration.Builder builderForValue
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the start of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -868,7 +868,7 @@ public Builder mergeStartTime(com.google.protobuf.Duration value) {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the start of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -893,7 +893,7 @@ public Builder clearStartTime() {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the start of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -912,7 +912,7 @@ public com.google.protobuf.Duration.Builder getStartTimeBuilder() {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the start of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -933,7 +933,7 @@ public com.google.protobuf.DurationOrBuilder getStartTimeOrBuilder() {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the start of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -970,7 +970,7 @@ public com.google.protobuf.DurationOrBuilder getStartTimeOrBuilder() {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the end of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -987,7 +987,7 @@ public boolean hasEndTime() {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the end of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -1008,7 +1008,7 @@ public com.google.protobuf.Duration getEndTime() {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the end of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -1035,7 +1035,7 @@ public Builder setEndTime(com.google.protobuf.Duration value) {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the end of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -1059,7 +1059,7 @@ public Builder setEndTime(com.google.protobuf.Duration.Builder builderForValue)
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the end of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -1088,7 +1088,7 @@ public Builder mergeEndTime(com.google.protobuf.Duration value) {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the end of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -1113,7 +1113,7 @@ public Builder clearEndTime() {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the end of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -1132,7 +1132,7 @@ public com.google.protobuf.Duration.Builder getEndTimeBuilder() {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the end of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -1153,7 +1153,7 @@ public com.google.protobuf.DurationOrBuilder getEndTimeOrBuilder() {
      *
      *
      * 
-     * Output only. Time offset relative to the beginning of the audio,
+     * Time offset relative to the beginning of the audio,
      * and corresponding to the end of the spoken word.
      * This field is only set if `enable_word_time_offsets=true` and only
      * in the top hypothesis.
@@ -1185,7 +1185,7 @@ public com.google.protobuf.DurationOrBuilder getEndTimeOrBuilder() {
      *
      *
      * 
-     * Output only. The word corresponding to this set of information.
+     * The word corresponding to this set of information.
      * 
* * string word = 3; @@ -1205,7 +1205,7 @@ public java.lang.String getWord() { * * *
-     * Output only. The word corresponding to this set of information.
+     * The word corresponding to this set of information.
      * 
* * string word = 3; @@ -1225,7 +1225,7 @@ public com.google.protobuf.ByteString getWordBytes() { * * *
-     * Output only. The word corresponding to this set of information.
+     * The word corresponding to this set of information.
      * 
* * string word = 3; @@ -1243,7 +1243,7 @@ public Builder setWord(java.lang.String value) { * * *
-     * Output only. The word corresponding to this set of information.
+     * The word corresponding to this set of information.
      * 
* * string word = 3; @@ -1258,7 +1258,7 @@ public Builder clearWord() { * * *
-     * Output only. The word corresponding to this set of information.
+     * The word corresponding to this set of information.
      * 
* * string word = 3; @@ -1279,7 +1279,7 @@ public Builder setWordBytes(com.google.protobuf.ByteString value) { * * *
-     * Output only. The confidence estimate between 0.0 and 1.0. A higher number
+     * The confidence estimate between 0.0 and 1.0. A higher number
      * indicates an estimated greater likelihood that the recognized words are
      * correct. This field is set only for the top alternative of a non-streaming
      * result or, of a streaming result where `is_final=true`.
@@ -1297,7 +1297,7 @@ public float getConfidence() {
      *
      *
      * 
-     * Output only. The confidence estimate between 0.0 and 1.0. A higher number
+     * The confidence estimate between 0.0 and 1.0. A higher number
      * indicates an estimated greater likelihood that the recognized words are
      * correct. This field is set only for the top alternative of a non-streaming
      * result or, of a streaming result where `is_final=true`.
@@ -1318,7 +1318,7 @@ public Builder setConfidence(float value) {
      *
      *
      * 
-     * Output only. The confidence estimate between 0.0 and 1.0. A higher number
+     * The confidence estimate between 0.0 and 1.0. A higher number
      * indicates an estimated greater likelihood that the recognized words are
      * correct. This field is set only for the top alternative of a non-streaming
      * result or, of a streaming result where `is_final=true`.
@@ -1341,7 +1341,7 @@ public Builder clearConfidence() {
      *
      *
      * 
-     * Output only. A distinct integer value is assigned for every speaker within
+     * A distinct integer value is assigned for every speaker within
      * the audio. This field specifies which one of those speakers was detected to
      * have spoken this word. Value ranges from '1' to diarization_speaker_count.
      * speaker_tag is set if enable_speaker_diarization = 'true' and only in the
@@ -1357,7 +1357,7 @@ public int getSpeakerTag() {
      *
      *
      * 
-     * Output only. A distinct integer value is assigned for every speaker within
+     * A distinct integer value is assigned for every speaker within
      * the audio. This field specifies which one of those speakers was detected to
      * have spoken this word. Value ranges from '1' to diarization_speaker_count.
      * speaker_tag is set if enable_speaker_diarization = 'true' and only in the
@@ -1376,7 +1376,7 @@ public Builder setSpeakerTag(int value) {
      *
      *
      * 
-     * Output only. A distinct integer value is assigned for every speaker within
+     * A distinct integer value is assigned for every speaker within
      * the audio. This field specifies which one of those speakers was detected to
      * have spoken this word. Value ranges from '1' to diarization_speaker_count.
      * speaker_tag is set if enable_speaker_diarization = 'true' and only in the
diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/WordInfoOrBuilder.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/WordInfoOrBuilder.java
index 540ab80d0..8f37795f8 100644
--- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/WordInfoOrBuilder.java
+++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/WordInfoOrBuilder.java
@@ -27,7 +27,7 @@ public interface WordInfoOrBuilder
    *
    *
    * 
-   * Output only. Time offset relative to the beginning of the audio,
+   * Time offset relative to the beginning of the audio,
    * and corresponding to the start of the spoken word.
    * This field is only set if `enable_word_time_offsets=true` and only
    * in the top hypothesis.
@@ -42,7 +42,7 @@ public interface WordInfoOrBuilder
    *
    *
    * 
-   * Output only. Time offset relative to the beginning of the audio,
+   * Time offset relative to the beginning of the audio,
    * and corresponding to the start of the spoken word.
    * This field is only set if `enable_word_time_offsets=true` and only
    * in the top hypothesis.
@@ -57,7 +57,7 @@ public interface WordInfoOrBuilder
    *
    *
    * 
-   * Output only. Time offset relative to the beginning of the audio,
+   * Time offset relative to the beginning of the audio,
    * and corresponding to the start of the spoken word.
    * This field is only set if `enable_word_time_offsets=true` and only
    * in the top hypothesis.
@@ -73,7 +73,7 @@ public interface WordInfoOrBuilder
    *
    *
    * 
-   * Output only. Time offset relative to the beginning of the audio,
+   * Time offset relative to the beginning of the audio,
    * and corresponding to the end of the spoken word.
    * This field is only set if `enable_word_time_offsets=true` and only
    * in the top hypothesis.
@@ -88,7 +88,7 @@ public interface WordInfoOrBuilder
    *
    *
    * 
-   * Output only. Time offset relative to the beginning of the audio,
+   * Time offset relative to the beginning of the audio,
    * and corresponding to the end of the spoken word.
    * This field is only set if `enable_word_time_offsets=true` and only
    * in the top hypothesis.
@@ -103,7 +103,7 @@ public interface WordInfoOrBuilder
    *
    *
    * 
-   * Output only. Time offset relative to the beginning of the audio,
+   * Time offset relative to the beginning of the audio,
    * and corresponding to the end of the spoken word.
    * This field is only set if `enable_word_time_offsets=true` and only
    * in the top hypothesis.
@@ -119,7 +119,7 @@ public interface WordInfoOrBuilder
    *
    *
    * 
-   * Output only. The word corresponding to this set of information.
+   * The word corresponding to this set of information.
    * 
* * string word = 3; @@ -129,7 +129,7 @@ public interface WordInfoOrBuilder * * *
-   * Output only. The word corresponding to this set of information.
+   * The word corresponding to this set of information.
    * 
* * string word = 3; @@ -140,7 +140,7 @@ public interface WordInfoOrBuilder * * *
-   * Output only. The confidence estimate between 0.0 and 1.0. A higher number
+   * The confidence estimate between 0.0 and 1.0. A higher number
    * indicates an estimated greater likelihood that the recognized words are
    * correct. This field is set only for the top alternative of a non-streaming
    * result or, of a streaming result where `is_final=true`.
@@ -157,7 +157,7 @@ public interface WordInfoOrBuilder
    *
    *
    * 
-   * Output only. A distinct integer value is assigned for every speaker within
+   * A distinct integer value is assigned for every speaker within
    * the audio. This field specifies which one of those speakers was detected to
    * have spoken this word. Value ranges from '1' to diarization_speaker_count.
    * speaker_tag is set if enable_speaker_diarization = 'true' and only in the
diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/proto/google/cloud/speech/v1p1beta1/cloud_speech.proto b/proto-google-cloud-speech-v1p1beta1/src/main/proto/google/cloud/speech/v1p1beta1/cloud_speech.proto
index 4ca4479a8..7718b0eb3 100644
--- a/proto-google-cloud-speech-v1p1beta1/src/main/proto/google/cloud/speech/v1p1beta1/cloud_speech.proto
+++ b/proto-google-cloud-speech-v1p1beta1/src/main/proto/google/cloud/speech/v1p1beta1/cloud_speech.proto
@@ -1,4 +1,4 @@
-// Copyright 2018 Google LLC.
+// Copyright 2019 Google LLC.
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -18,10 +18,11 @@ syntax = "proto3";
 package google.cloud.speech.v1p1beta1;
 
 import "google/api/annotations.proto";
+import "google/api/client.proto";
+import "google/api/field_behavior.proto";
 import "google/longrunning/operations.proto";
 import "google/protobuf/any.proto";
 import "google/protobuf/duration.proto";
-import "google/protobuf/empty.proto";
 import "google/protobuf/timestamp.proto";
 import "google/rpc/status.proto";
 
@@ -30,9 +31,13 @@ option go_package = "google.golang.org/genproto/googleapis/cloud/speech/v1p1beta
 option java_multiple_files = true;
 option java_outer_classname = "SpeechProto";
 option java_package = "com.google.cloud.speech.v1p1beta1";
+option objc_class_prefix = "GCS";
 
 // Service that implements Google Cloud Speech API.
 service Speech {
+  option (google.api.default_host) = "speech.googleapis.com";
+  option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
+
   // Performs synchronous speech recognition: receive results after all audio
   // has been sent and processed.
   rpc Recognize(RecognizeRequest) returns (RecognizeResponse) {
@@ -40,52 +45,59 @@ service Speech {
       post: "/v1p1beta1/speech:recognize"
       body: "*"
     };
+    option (google.api.method_signature) = "config,audio";
   }
 
   // Performs asynchronous speech recognition: receive results via the
   // google.longrunning.Operations interface. Returns either an
   // `Operation.error` or an `Operation.response` which contains
   // a `LongRunningRecognizeResponse` message.
-  rpc LongRunningRecognize(LongRunningRecognizeRequest)
-      returns (google.longrunning.Operation) {
+  // For more information on asynchronous speech recognition, see the
+  // [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).
+  rpc LongRunningRecognize(LongRunningRecognizeRequest) returns (google.longrunning.Operation) {
     option (google.api.http) = {
       post: "/v1p1beta1/speech:longrunningrecognize"
       body: "*"
     };
+    option (google.api.method_signature) = "config,audio";
+    option (google.longrunning.operation_info) = {
+      response_type: "LongRunningRecognizeResponse"
+      metadata_type: "LongRunningRecognizeMetadata"
+    };
   }
 
   // Performs bidirectional streaming speech recognition: receive results while
   // sending audio. This method is only available via the gRPC API (not REST).
-  rpc StreamingRecognize(stream StreamingRecognizeRequest)
-      returns (stream StreamingRecognizeResponse) {}
+  rpc StreamingRecognize(stream StreamingRecognizeRequest) returns (stream StreamingRecognizeResponse) {
+  }
 }
 
 // The top-level message sent by the client for the `Recognize` method.
 message RecognizeRequest {
-  // *Required* Provides information to the recognizer that specifies how to
+  // Required. Provides information to the recognizer that specifies how to
   // process the request.
-  RecognitionConfig config = 1;
+  RecognitionConfig config = 1 [(google.api.field_behavior) = REQUIRED];
 
-  // *Required* The audio data to be recognized.
-  RecognitionAudio audio = 2;
+  // Required. The audio data to be recognized.
+  RecognitionAudio audio = 2 [(google.api.field_behavior) = REQUIRED];
 }
 
 // The top-level message sent by the client for the `LongRunningRecognize`
 // method.
 message LongRunningRecognizeRequest {
-  // *Required* Provides information to the recognizer that specifies how to
+  // Required. Provides information to the recognizer that specifies how to
   // process the request.
-  RecognitionConfig config = 1;
+  RecognitionConfig config = 1 [(google.api.field_behavior) = REQUIRED];
 
-  // *Required* The audio data to be recognized.
-  RecognitionAudio audio = 2;
+  // Required. The audio data to be recognized.
+  RecognitionAudio audio = 2 [(google.api.field_behavior) = REQUIRED];
 }
 
 // The top-level message sent by the client for the `StreamingRecognize` method.
 // Multiple `StreamingRecognizeRequest` messages are sent. The first message
-// must contain a `streaming_config` message and must not contain `audio` data.
-// All subsequent messages must contain `audio` data and must not contain a
-// `streaming_config` message.
+// must contain a `streaming_config` message and must not contain
+// `audio_content`. All subsequent messages must contain `audio_content` and
+// must not contain a `streaming_config` message.
 message StreamingRecognizeRequest {
   // The streaming request, which is either a streaming config or audio content.
   oneof streaming_request {
@@ -99,9 +111,9 @@ message StreamingRecognizeRequest {
     // `StreamingRecognizeRequest` message must not contain `audio_content` data
     // and all subsequent `StreamingRecognizeRequest` messages must contain
     // `audio_content` data. The audio bytes must be encoded as specified in
-    // `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a
+    // `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
     // pure binary representation (not base64). See
-    // [content limits](/speech-to-text/quotas#content).
+    // [content limits](https://cloud.google.com/speech-to-text/quotas#content).
     bytes audio_content = 2;
   }
 }
@@ -109,11 +121,11 @@ message StreamingRecognizeRequest {
 // Provides information to the recognizer that specifies how to process the
 // request.
 message StreamingRecognitionConfig {
-  // *Required* Provides information to the recognizer that specifies how to
+  // Required. Provides information to the recognizer that specifies how to
   // process the request.
-  RecognitionConfig config = 1;
+  RecognitionConfig config = 1 [(google.api.field_behavior) = REQUIRED];
 
-  // *Optional* If `false` or omitted, the recognizer will perform continuous
+  // If `false` or omitted, the recognizer will perform continuous
   // recognition (continuing to wait for and process audio even if the user
   // pauses speaking) until the client closes the input stream (gRPC API) or
   // until the maximum time limit has been reached. May return multiple
@@ -126,7 +138,7 @@ message StreamingRecognitionConfig {
   // `true`.
   bool single_utterance = 2;
 
-  // *Optional* If `true`, interim results (tentative hypotheses) may be
+  // If `true`, interim results (tentative hypotheses) may be
   // returned as they become available (these interim results are indicated with
   // the `is_final=false` flag).
   // If `false` or omitted, only `is_final=true` result(s) are returned.
@@ -138,13 +150,15 @@ message StreamingRecognitionConfig {
 message RecognitionConfig {
   // The encoding of the audio data sent in the request.
   //
-  // All encodings support only 1 channel (mono) audio.
+  // All encodings support only 1 channel (mono) audio, unless the
+  // `audio_channel_count` and `enable_separate_recognition_per_channel` fields
+  // are set.
   //
   // For best results, the audio source should be captured and transmitted using
   // a lossless encoding (`FLAC` or `LINEAR16`). The accuracy of the speech
   // recognition can be reduced if lossy codecs are used to capture or transmit
   // audio, particularly if background noise is present. Lossy codecs include
-  // `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, and `SPEEX_WITH_HEADER_BYTE`.
+  // `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, `SPEEX_WITH_HEADER_BYTE`, and `MP3`.
   //
   // The `FLAC` and `WAV` audio file formats include a header that describes the
   // included audio content. You can request recognition for `WAV` files that
@@ -155,8 +169,7 @@ message RecognitionConfig {
   // an `AudioEncoding` when you send  send `FLAC` or `WAV` audio, the
   // encoding configuration must match the encoding described in the audio
   // header; otherwise the request returns an
-  // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error
-  // code.
+  // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error code.
   enum AudioEncoding {
     // Not specified.
     ENCODING_UNSPECIFIED = 0;
@@ -209,8 +222,7 @@ message RecognitionConfig {
 
   // Encoding of audio data sent in all `RecognitionAudio` messages.
   // This field is optional for `FLAC` and `WAV` audio files and required
-  // for all other audio formats. For details, see
-  // [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
+  // for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
   AudioEncoding encoding = 1;
 
   // Sample rate in Hertz of the audio data sent in all
@@ -218,12 +230,11 @@ message RecognitionConfig {
   // 16000 is optimal. For best results, set the sampling rate of the audio
   // source to 16000 Hz. If that's not possible, use the native sample rate of
   // the audio source (instead of re-sampling).
-  // This field is optional for `FLAC` and `WAV` audio files and required
-  // for all other audio formats. For details, see
-  // [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
+  // This field is optional for FLAC and WAV audio files, but is
+  // required for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
   int32 sample_rate_hertz = 2;
 
-  // *Optional* The number of channels in the input audio data.
+  // The number of channels in the input audio data.
   // ONLY set this for MULTI-CHANNEL recognition.
   // Valid values for LINEAR16 and FLAC are `1`-`8`.
   // Valid values for OGG_OPUS are '1'-'254'.
@@ -234,7 +245,7 @@ message RecognitionConfig {
   // `enable_separate_recognition_per_channel` to 'true'.
   int32 audio_channel_count = 7;
 
-  // This needs to be set to ‘true’ explicitly and `audio_channel_count` > 1
+  // This needs to be set to `true` explicitly and `audio_channel_count` > 1
   // to get each channel recognized separately. The recognition result will
   // contain a `channel_tag` field to state which channel that result belongs
   // to. If this is not true, we will only recognize the first channel. The
@@ -242,28 +253,29 @@ message RecognitionConfig {
   // `audio_channel_count` multiplied by the length of the audio.
   bool enable_separate_recognition_per_channel = 12;
 
-  // *Required* The language of the supplied audio as a
+  // Required. The language of the supplied audio as a
   // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
   // Example: "en-US".
-  // See [Language Support](/speech-to-text/docs/languages)
-  // for a list of the currently supported language codes.
-  string language_code = 3;
+  // See [Language
+  // Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+  // of the currently supported language codes.
+  string language_code = 3 [(google.api.field_behavior) = REQUIRED];
 
-  // *Optional* A list of up to 3 additional
+  // A list of up to 3 additional
   // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
   // listing possible alternative languages of the supplied audio.
-  // See [Language Support](/speech-to-text/docs/languages)
-  // for a list of the currently supported language codes.
-  // If alternative languages are listed, recognition result will contain
-  // recognition in the most likely language detected including the main
-  // language_code. The recognition result will include the language tag
-  // of the language detected in the audio.
-  // Note: This feature is only supported for Voice Command and Voice Search
-  // use cases and performance may vary for other use cases (e.g., phone call
+  // See [Language
+  // Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+  // of the currently supported language codes. If alternative languages are
+  // listed, recognition result will contain recognition in the most likely
+  // language detected including the main language_code. The recognition result
+  // will include the language tag of the language detected in the audio. Note:
+  // This feature is only supported for Voice Command and Voice Search use cases
+  // and performance may vary for other use cases (e.g., phone call
   // transcription).
   repeated string alternative_language_codes = 18;
 
-  // *Optional* Maximum number of recognition hypotheses to be returned.
+  // Maximum number of recognition hypotheses to be returned.
   // Specifically, the maximum number of `SpeechRecognitionAlternative` messages
   // within each `SpeechRecognitionResult`.
   // The server may return fewer than `max_alternatives`.
@@ -271,30 +283,31 @@ message RecognitionConfig {
   // one. If omitted, will return a maximum of one.
   int32 max_alternatives = 4;
 
-  // *Optional* If set to `true`, the server will attempt to filter out
+  // If set to `true`, the server will attempt to filter out
   // profanities, replacing all but the initial character in each filtered word
   // with asterisks, e.g. "f***". If set to `false` or omitted, profanities
   // won't be filtered out.
   bool profanity_filter = 5;
 
-  // *Optional* array of
-  // [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
-  // provide context to assist the speech recognition. For more information, see
-  // [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+  // Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
+  // A means to provide context to assist the speech recognition. For more
+  // information, see
+  // [speech
+  // adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
   repeated SpeechContext speech_contexts = 6;
 
-  // *Optional* If `true`, the top result includes a list of words and
+  // If `true`, the top result includes a list of words and
   // the start and end time offsets (timestamps) for those words. If
   // `false`, no word-level time offset information is returned. The default is
   // `false`.
   bool enable_word_time_offsets = 8;
 
-  // *Optional* If `true`, the top result includes a list of words and the
+  // If `true`, the top result includes a list of words and the
   // confidence for those words. If `false`, no word-level confidence
   // information is returned. The default is `false`.
   bool enable_word_confidence = 15;
 
-  // *Optional* If 'true', adds punctuation to recognition result hypotheses.
+  // If 'true', adds punctuation to recognition result hypotheses.
   // This feature is only available in select languages. Setting this for
   // requests in other languages has no effect at all.
   // The default 'false' value does not add punctuation to result hypotheses.
@@ -303,19 +316,18 @@ message RecognitionConfig {
   // premium feature.
   bool enable_automatic_punctuation = 11;
 
-  // *Optional* If 'true', enables speaker detection for each recognized word in
+  // If 'true', enables speaker detection for each recognized word in
   // the top alternative of the recognition result using a speaker_tag provided
   // in the WordInfo.
   // Note: Use diarization_config instead.
   bool enable_speaker_diarization = 16 [deprecated = true];
 
-  // *Optional*
   // If set, specifies the estimated number of speakers in the conversation.
   // Defaults to '2'. Ignored unless enable_speaker_diarization is set to true.
   // Note: Use diarization_config instead.
   int32 diarization_speaker_count = 17 [deprecated = true];
 
-  // *Optional* Config to enable speaker diarization and set additional
+  // Config to enable speaker diarization and set additional
   // parameters to make diarization better suited for your application.
   // Note: When this is enabled, we send all the words from the beginning of the
   // audio for the top alternative in every consecutive STREAMING responses.
@@ -325,10 +337,10 @@ message RecognitionConfig {
   // in the top alternative of the FINAL SpeechRecognitionResult.
   SpeakerDiarizationConfig diarization_config = 19;
 
-  // *Optional* Metadata regarding this request.
+  // Metadata regarding this request.
   RecognitionMetadata metadata = 9;
 
-  // *Optional* Which model to select for the given request. Select the model
+  // Which model to select for the given request. Select the model
   // best suited to your domain to get best results. If a model is not
   // explicitly specified, then we auto-select a model based on the parameters
   // in the RecognitionConfig.
@@ -362,7 +374,7 @@ message RecognitionConfig {
   // 
   string model = 13;
 
-  // *Optional* Set to true to use an enhanced model for speech recognition.
+  // Set to true to use an enhanced model for speech recognition.
   // If `use_enhanced` is set to true and the `model` field is not set, then
   // an appropriate enhanced model is chosen if an enhanced model exists for
   // the audio.
@@ -373,23 +385,18 @@ message RecognitionConfig {
   bool use_enhanced = 14;
 }
 
-// *Optional* Config to enable speaker diarization.
+// Config to enable speaker diarization.
 message SpeakerDiarizationConfig {
-  // *Optional* If 'true', enables speaker detection for each recognized word in
+  // If 'true', enables speaker detection for each recognized word in
   // the top alternative of the recognition result using a speaker_tag provided
   // in the WordInfo.
   bool enable_speaker_diarization = 1;
 
-  // Note: Set min_speaker_count = max_speaker_count to fix the number of
-  // speakers to be detected in the audio.
-
-  // *Optional*
   // Minimum number of speakers in the conversation. This range gives you more
   // flexibility by allowing the system to automatically determine the correct
   // number of speakers. If not set, the default value is 2.
   int32 min_speaker_count = 2;
 
-  // *Optional*
   // Maximum number of speakers in the conversation. This range gives you more
   // flexibility by allowing the system to automatically determine the correct
   // number of speakers. If not set, the default value is 6.
@@ -520,7 +527,7 @@ message RecognitionMetadata {
 
   // Obfuscated (privacy-protected) ID of the user, to identify number of
   // unique users using the service.
-  int64 obfuscated_id = 9;
+  int64 obfuscated_id = 9 [deprecated = true];
 
   // Description of the content. Eg. "Recordings of federal supreme court
   // hearings from 2012".
@@ -530,12 +537,12 @@ message RecognitionMetadata {
 // Provides "hints" to the speech recognizer to favor specific words and phrases
 // in the results.
 message SpeechContext {
-  // *Optional* A list of strings containing words and phrases "hints" so that
+  // A list of strings containing words and phrases "hints" so that
   // the speech recognition is more likely to recognize them. This can be used
   // to improve the accuracy for specific words and phrases, for example, if
   // specific commands are typically spoken by the user. This can also be used
   // to add additional words to the vocabulary of the recognizer. See
-  // [usage limits](/speech-to-text/quotas#content).
+  // [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
   //
   // List items can also be set to classes for groups of words that represent
   // common concepts that occur in natural language. For example, rather than
@@ -557,14 +564,14 @@ message SpeechContext {
 
 // Contains audio data in the encoding specified in the `RecognitionConfig`.
 // Either `content` or `uri` must be supplied. Supplying both or neither
-// returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT].
-// See [content limits](/speech-to-text/quotas#content).
+// returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. See
+// [content limits](https://cloud.google.com/speech-to-text/quotas#content).
 message RecognitionAudio {
   // The audio source, which is either inline content or a Google Cloud
   // Storage uri.
   oneof audio_source {
     // The audio data bytes encoded as specified in
-    // `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a
+    // `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
     // pure binary representation, whereas JSON representations use base64.
     bytes content = 1;
 
@@ -573,9 +580,8 @@ message RecognitionAudio {
     // Currently, only Google Cloud Storage URIs are
     // supported, which must be specified in the following format:
     // `gs://bucket_name/object_name` (other URI formats return
-    // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]).
-    // For more information, see [Request
-    // URIs](https://cloud.google.com/storage/docs/reference-uris).
+    // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
+    // [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
     string uri = 2;
   }
 }
@@ -584,7 +590,7 @@ message RecognitionAudio {
 // contains the result as zero or more sequential `SpeechRecognitionResult`
 // messages.
 message RecognizeResponse {
-  // Output only. Sequential list of transcription results corresponding to
+  // Sequential list of transcription results corresponding to
   // sequential portions of audio.
   repeated SpeechRecognitionResult results = 2;
 }
@@ -595,7 +601,7 @@ message RecognizeResponse {
 // returned by the `GetOperation` call of the `google::longrunning::Operations`
 // service.
 message LongRunningRecognizeResponse {
-  // Output only. Sequential list of transcription results corresponding to
+  // Sequential list of transcription results corresponding to
   // sequential portions of audio.
   repeated SpeechRecognitionResult results = 2;
 }
@@ -680,44 +686,44 @@ message StreamingRecognizeResponse {
     END_OF_SINGLE_UTTERANCE = 1;
   }
 
-  // Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
-  // message that specifies the error for the operation.
+  // If set, returns a [google.rpc.Status][google.rpc.Status] message that
+  // specifies the error for the operation.
   google.rpc.Status error = 1;
 
-  // Output only. This repeated list contains zero or more results that
+  // This repeated list contains zero or more results that
   // correspond to consecutive portions of the audio currently being processed.
   // It contains zero or one `is_final=true` result (the newly settled portion),
   // followed by zero or more `is_final=false` results (the interim results).
   repeated StreamingRecognitionResult results = 2;
 
-  // Output only. Indicates the type of speech event.
+  // Indicates the type of speech event.
   SpeechEventType speech_event_type = 4;
 }
 
 // A streaming speech recognition result corresponding to a portion of the audio
 // that is currently being processed.
 message StreamingRecognitionResult {
-  // Output only. May contain one or more recognition hypotheses (up to the
+  // May contain one or more recognition hypotheses (up to the
   // maximum specified in `max_alternatives`).
   // These alternatives are ordered in terms of accuracy, with the top (first)
   // alternative being the most probable, as ranked by the recognizer.
   repeated SpeechRecognitionAlternative alternatives = 1;
 
-  // Output only. If `false`, this `StreamingRecognitionResult` represents an
+  // If `false`, this `StreamingRecognitionResult` represents an
   // interim result that may change. If `true`, this is the final time the
   // speech service will return this particular `StreamingRecognitionResult`,
   // the recognizer will not return any further hypotheses for this portion of
   // the transcript and corresponding audio.
   bool is_final = 2;
 
-  // Output only. An estimate of the likelihood that the recognizer will not
+  // An estimate of the likelihood that the recognizer will not
   // change its guess about this interim result. Values range from 0.0
   // (completely unstable) to 1.0 (completely stable).
   // This field is only provided for interim results (`is_final=false`).
   // The default of 0.0 is a sentinel value indicating `stability` was not set.
   float stability = 3;
 
-  // Output only. Time offset of the end of this result relative to the
+  // Time offset of the end of this result relative to the
   // beginning of the audio.
   google.protobuf.Duration result_end_time = 4;
 
@@ -726,16 +732,15 @@ message StreamingRecognitionResult {
   // For audio_channel_count = N, its output values can range from '1' to 'N'.
   int32 channel_tag = 5;
 
-  // Output only. The
-  // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-  // language in this result. This language code was detected to have the most
-  // likelihood of being spoken in the audio.
+  // The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag
+  // of the language in this result. This language code was detected to have
+  // the most likelihood of being spoken in the audio.
   string language_code = 6;
 }
 
 // A speech recognition result corresponding to a portion of the audio.
 message SpeechRecognitionResult {
-  // Output only. May contain one or more recognition hypotheses (up to the
+  // May contain one or more recognition hypotheses (up to the
   // maximum specified in `max_alternatives`).
   // These alternatives are ordered in terms of accuracy, with the top (first)
   // alternative being the most probable, as ranked by the recognizer.
@@ -746,19 +751,18 @@ message SpeechRecognitionResult {
   // For audio_channel_count = N, its output values can range from '1' to 'N'.
   int32 channel_tag = 2;
 
-  // Output only. The
-  // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
-  // language in this result. This language code was detected to have the most
-  // likelihood of being spoken in the audio.
+  // The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag
+  // of the language in this result. This language code was detected to have
+  // the most likelihood of being spoken in the audio.
   string language_code = 5;
 }
 
 // Alternative hypotheses (a.k.a. n-best list).
 message SpeechRecognitionAlternative {
-  // Output only. Transcript text representing the words that the user spoke.
+  // Transcript text representing the words that the user spoke.
   string transcript = 1;
 
-  // Output only. The confidence estimate between 0.0 and 1.0. A higher number
+  // The confidence estimate between 0.0 and 1.0. A higher number
   // indicates an estimated greater likelihood that the recognized words are
   // correct. This field is set only for the top alternative of a non-streaming
   // result or, of a streaming result where `is_final=true`.
@@ -767,7 +771,7 @@ message SpeechRecognitionAlternative {
   // The default of 0.0 is a sentinel value indicating `confidence` was not set.
   float confidence = 2;
 
-  // Output only. A list of word-specific information for each recognized word.
+  // A list of word-specific information for each recognized word.
   // Note: When `enable_speaker_diarization` is true, you will see all the words
   // from the beginning of the audio.
   repeated WordInfo words = 3;
@@ -775,7 +779,7 @@ message SpeechRecognitionAlternative {
 
 // Word-specific information for recognized words.
 message WordInfo {
-  // Output only. Time offset relative to the beginning of the audio,
+  // Time offset relative to the beginning of the audio,
   // and corresponding to the start of the spoken word.
   // This field is only set if `enable_word_time_offsets=true` and only
   // in the top hypothesis.
@@ -783,7 +787,7 @@ message WordInfo {
   // vary.
   google.protobuf.Duration start_time = 1;
 
-  // Output only. Time offset relative to the beginning of the audio,
+  // Time offset relative to the beginning of the audio,
   // and corresponding to the end of the spoken word.
   // This field is only set if `enable_word_time_offsets=true` and only
   // in the top hypothesis.
@@ -791,10 +795,10 @@ message WordInfo {
   // vary.
   google.protobuf.Duration end_time = 2;
 
-  // Output only. The word corresponding to this set of information.
+  // The word corresponding to this set of information.
   string word = 3;
 
-  // Output only. The confidence estimate between 0.0 and 1.0. A higher number
+  // The confidence estimate between 0.0 and 1.0. A higher number
   // indicates an estimated greater likelihood that the recognized words are
   // correct. This field is set only for the top alternative of a non-streaming
   // result or, of a streaming result where `is_final=true`.
@@ -803,7 +807,7 @@ message WordInfo {
   // The default of 0.0 is a sentinel value indicating `confidence` was not set.
   float confidence = 4;
 
-  // Output only. A distinct integer value is assigned for every speaker within
+  // A distinct integer value is assigned for every speaker within
   // the audio. This field specifies which one of those speakers was detected to
   // have spoken this word. Value ranges from '1' to diarization_speaker_count.
   // speaker_tag is set if enable_speaker_diarization = 'true' and only in the
diff --git a/renovate.json b/renovate.json
index dae8a245c..998104700 100644
--- a/renovate.json
+++ b/renovate.json
@@ -5,14 +5,9 @@
   "ignoreDeps": [],
   "packageRules": [
     {
-      "packagePatterns": ["*"],
-      "semanticCommitType": "chore"
-    },
-    {
-      "depTypeList": [
-        "dependencies"
-      ],
-      "semanticCommitType": "deps"
+      "managers": ["maven"],
+      "packageNames": ["com.google.guava:guava*"],
+      "versionScheme": "docker"
     },
     {
       "packagePatterns": ["^io.grpc:grpc-"],
@@ -27,5 +22,6 @@
       "groupName": "OpenCensus packages"
     }
   ],
-  "semanticCommits": true
+  "semanticCommits": true,
+  "semanticCommitType": "deps"
 }
diff --git a/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeAsync.java b/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeAsync.java
new file mode 100644
index 000000000..fde397cb6
--- /dev/null
+++ b/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeAsync.java
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// DO NOT EDIT! This is a generated sample ("LongRunningRequestAsync",  "speech_transcribe_async")
+// sample-metadata:
+//   title: Transcribe Audio File using Long Running Operation (Local File) (LRO)
+//   description: Transcribe a long audio file using asynchronous speech recognition
+//   usage: gradle run -PmainClass=com.google.cloud.examples.speech.v1.SpeechTranscribeAsync [--args='[--local_file_path "resources/brooklyn_bridge.raw"]']
+
+package com.google.cloud.examples.speech.v1;
+
+import com.google.api.gax.longrunning.OperationFuture;
+import com.google.cloud.speech.v1.LongRunningRecognizeMetadata;
+import com.google.cloud.speech.v1.LongRunningRecognizeRequest;
+import com.google.cloud.speech.v1.LongRunningRecognizeResponse;
+import com.google.cloud.speech.v1.RecognitionAudio;
+import com.google.cloud.speech.v1.RecognitionConfig;
+import com.google.cloud.speech.v1.SpeechClient;
+import com.google.cloud.speech.v1.SpeechRecognitionAlternative;
+import com.google.cloud.speech.v1.SpeechRecognitionResult;
+import com.google.protobuf.ByteString;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.DefaultParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+
+public class SpeechTranscribeAsync {
+  // [START speech_transcribe_async]
+  /*
+   * Please include the following imports to run this sample.
+   *
+   * import com.google.api.gax.longrunning.OperationFuture;
+   * import com.google.cloud.speech.v1.LongRunningRecognizeMetadata;
+   * import com.google.cloud.speech.v1.LongRunningRecognizeRequest;
+   * import com.google.cloud.speech.v1.LongRunningRecognizeResponse;
+   * import com.google.cloud.speech.v1.RecognitionAudio;
+   * import com.google.cloud.speech.v1.RecognitionConfig;
+   * import com.google.cloud.speech.v1.SpeechClient;
+   * import com.google.cloud.speech.v1.SpeechRecognitionAlternative;
+   * import com.google.cloud.speech.v1.SpeechRecognitionResult;
+   * import com.google.protobuf.ByteString;
+   * import java.nio.file.Files;
+   * import java.nio.file.Path;
+   * import java.nio.file.Paths;
+   */
+
+  /**
+   * Transcribe a long audio file using asynchronous speech recognition
+   *
+   * @param localFilePath Path to local audio file, e.g. /path/audio.wav
+   */
+  public static void sampleLongRunningRecognize(String localFilePath) {
+    try (SpeechClient speechClient = SpeechClient.create()) {
+      // localFilePath = "resources/brooklyn_bridge.raw";
+
+      // The language of the supplied audio
+      String languageCode = "en-US";
+
+      // Sample rate in Hertz of the audio data sent
+      int sampleRateHertz = 16000;
+
+      // Encoding of audio data sent. This sample sets this explicitly.
+      // This field is optional for FLAC and WAV audio formats.
+      RecognitionConfig.AudioEncoding encoding = RecognitionConfig.AudioEncoding.LINEAR16;
+      RecognitionConfig config =
+          RecognitionConfig.newBuilder()
+              .setLanguageCode(languageCode)
+              .setSampleRateHertz(sampleRateHertz)
+              .setEncoding(encoding)
+              .build();
+      Path path = Paths.get(localFilePath);
+      byte[] data = Files.readAllBytes(path);
+      ByteString content = ByteString.copyFrom(data);
+      RecognitionAudio audio = RecognitionAudio.newBuilder().setContent(content).build();
+      LongRunningRecognizeRequest request =
+          LongRunningRecognizeRequest.newBuilder().setConfig(config).setAudio(audio).build();
+      OperationFuture future =
+          speechClient.longRunningRecognizeAsync(request);
+
+      System.out.println("Waiting for operation to complete...");
+      LongRunningRecognizeResponse response = future.get();
+      for (SpeechRecognitionResult result : response.getResultsList()) {
+        // First alternative is the most probable result
+        SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
+        System.out.printf("Transcript: %s\n", alternative.getTranscript());
+      }
+    } catch (Exception exception) {
+      System.err.println("Failed to create the client due to: " + exception);
+    }
+  }
+  // [END speech_transcribe_async]
+
+  public static void main(String[] args) throws Exception {
+    Options options = new Options();
+    options.addOption(
+        Option.builder("").required(false).hasArg(true).longOpt("local_file_path").build());
+
+    CommandLine cl = (new DefaultParser()).parse(options, args);
+    String localFilePath = cl.getOptionValue("local_file_path", "resources/brooklyn_bridge.raw");
+
+    sampleLongRunningRecognize(localFilePath);
+  }
+}
diff --git a/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeAsyncGcs.java b/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeAsyncGcs.java
new file mode 100644
index 000000000..ab96b670f
--- /dev/null
+++ b/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeAsyncGcs.java
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// DO NOT EDIT! This is a generated sample ("LongRunningRequestAsync",  "speech_transcribe_async_gcs")
+// sample-metadata:
+//   title: Transcript Audio File using Long Running Operation (Cloud Storage) (LRO)
+//   description: Transcribe long audio file from Cloud Storage using asynchronous speech recognition
+//   usage: gradle run -PmainClass=com.google.cloud.examples.speech.v1.SpeechTranscribeAsyncGcs [--args='[--storage_uri "gs://cloud-samples-data/speech/brooklyn_bridge.raw"]']
+
+package com.google.cloud.examples.speech.v1;
+
+import com.google.api.gax.longrunning.OperationFuture;
+import com.google.cloud.speech.v1.LongRunningRecognizeMetadata;
+import com.google.cloud.speech.v1.LongRunningRecognizeRequest;
+import com.google.cloud.speech.v1.LongRunningRecognizeResponse;
+import com.google.cloud.speech.v1.RecognitionAudio;
+import com.google.cloud.speech.v1.RecognitionConfig;
+import com.google.cloud.speech.v1.SpeechClient;
+import com.google.cloud.speech.v1.SpeechRecognitionAlternative;
+import com.google.cloud.speech.v1.SpeechRecognitionResult;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.DefaultParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+
+public class SpeechTranscribeAsyncGcs {
+  // [START speech_transcribe_async_gcs]
+  /*
+   * Please include the following imports to run this sample.
+   *
+   * import com.google.api.gax.longrunning.OperationFuture;
+   * import com.google.cloud.speech.v1.LongRunningRecognizeMetadata;
+   * import com.google.cloud.speech.v1.LongRunningRecognizeRequest;
+   * import com.google.cloud.speech.v1.LongRunningRecognizeResponse;
+   * import com.google.cloud.speech.v1.RecognitionAudio;
+   * import com.google.cloud.speech.v1.RecognitionConfig;
+   * import com.google.cloud.speech.v1.SpeechClient;
+   * import com.google.cloud.speech.v1.SpeechRecognitionAlternative;
+   * import com.google.cloud.speech.v1.SpeechRecognitionResult;
+   */
+
+  /**
+   * Transcribe long audio file from Cloud Storage using asynchronous speech recognition
+   *
+   * @param storageUri URI for audio file in Cloud Storage, e.g. gs://[BUCKET]/[FILE]
+   */
+  public static void sampleLongRunningRecognize(String storageUri) {
+    try (SpeechClient speechClient = SpeechClient.create()) {
+      // storageUri = "gs://cloud-samples-data/speech/brooklyn_bridge.raw";
+
+      // Sample rate in Hertz of the audio data sent
+      int sampleRateHertz = 16000;
+
+      // The language of the supplied audio
+      String languageCode = "en-US";
+
+      // Encoding of audio data sent. This sample sets this explicitly.
+      // This field is optional for FLAC and WAV audio formats.
+      RecognitionConfig.AudioEncoding encoding = RecognitionConfig.AudioEncoding.LINEAR16;
+      RecognitionConfig config =
+          RecognitionConfig.newBuilder()
+              .setSampleRateHertz(sampleRateHertz)
+              .setLanguageCode(languageCode)
+              .setEncoding(encoding)
+              .build();
+      RecognitionAudio audio = RecognitionAudio.newBuilder().setUri(storageUri).build();
+      LongRunningRecognizeRequest request =
+          LongRunningRecognizeRequest.newBuilder().setConfig(config).setAudio(audio).build();
+      OperationFuture future =
+          speechClient.longRunningRecognizeAsync(request);
+
+      System.out.println("Waiting for operation to complete...");
+      LongRunningRecognizeResponse response = future.get();
+      for (SpeechRecognitionResult result : response.getResultsList()) {
+        // First alternative is the most probable result
+        SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
+        System.out.printf("Transcript: %s\n", alternative.getTranscript());
+      }
+    } catch (Exception exception) {
+      System.err.println("Failed to create the client due to: " + exception);
+    }
+  }
+  // [END speech_transcribe_async_gcs]
+
+  public static void main(String[] args) throws Exception {
+    Options options = new Options();
+    options.addOption(
+        Option.builder("").required(false).hasArg(true).longOpt("storage_uri").build());
+
+    CommandLine cl = (new DefaultParser()).parse(options, args);
+    String storageUri =
+        cl.getOptionValue("storage_uri", "gs://cloud-samples-data/speech/brooklyn_bridge.raw");
+
+    sampleLongRunningRecognize(storageUri);
+  }
+}
diff --git a/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeAsyncWordTimeOffsetsGcs.java b/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeAsyncWordTimeOffsetsGcs.java
new file mode 100644
index 000000000..674277938
--- /dev/null
+++ b/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeAsyncWordTimeOffsetsGcs.java
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// DO NOT EDIT! This is a generated sample ("LongRunningRequestAsync",  "speech_transcribe_async_word_time_offsets_gcs")
+// sample-metadata:
+//   title: Getting word timestamps (Cloud Storage) (LRO)
+//   description: Print start and end time of each word spoken in audio file from Cloud Storage
+//   usage: gradle run -PmainClass=com.google.cloud.examples.speech.v1.SpeechTranscribeAsyncWordTimeOffsetsGcs [--args='[--storage_uri "gs://cloud-samples-data/speech/brooklyn_bridge.flac"]']
+
+package com.google.cloud.examples.speech.v1;
+
+import com.google.api.gax.longrunning.OperationFuture;
+import com.google.cloud.speech.v1.LongRunningRecognizeMetadata;
+import com.google.cloud.speech.v1.LongRunningRecognizeRequest;
+import com.google.cloud.speech.v1.LongRunningRecognizeResponse;
+import com.google.cloud.speech.v1.RecognitionAudio;
+import com.google.cloud.speech.v1.RecognitionConfig;
+import com.google.cloud.speech.v1.SpeechClient;
+import com.google.cloud.speech.v1.SpeechRecognitionAlternative;
+import com.google.cloud.speech.v1.SpeechRecognitionResult;
+import com.google.cloud.speech.v1.WordInfo;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.DefaultParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+
+public class SpeechTranscribeAsyncWordTimeOffsetsGcs {
+  // [START speech_transcribe_async_word_time_offsets_gcs]
+  /*
+   * Please include the following imports to run this sample.
+   *
+   * import com.google.api.gax.longrunning.OperationFuture;
+   * import com.google.cloud.speech.v1.LongRunningRecognizeMetadata;
+   * import com.google.cloud.speech.v1.LongRunningRecognizeRequest;
+   * import com.google.cloud.speech.v1.LongRunningRecognizeResponse;
+   * import com.google.cloud.speech.v1.RecognitionAudio;
+   * import com.google.cloud.speech.v1.RecognitionConfig;
+   * import com.google.cloud.speech.v1.SpeechClient;
+   * import com.google.cloud.speech.v1.SpeechRecognitionAlternative;
+   * import com.google.cloud.speech.v1.SpeechRecognitionResult;
+   * import com.google.cloud.speech.v1.WordInfo;
+   */
+
+  /**
+   * Print start and end time of each word spoken in audio file from Cloud Storage
+   *
+   * @param storageUri URI for audio file in Cloud Storage, e.g. gs://[BUCKET]/[FILE]
+   */
+  public static void sampleLongRunningRecognize(String storageUri) {
+    try (SpeechClient speechClient = SpeechClient.create()) {
+      // storageUri = "gs://cloud-samples-data/speech/brooklyn_bridge.flac";
+
+      // When enabled, the first result returned by the API will include a list
+      // of words and the start and end time offsets (timestamps) for those words.
+      boolean enableWordTimeOffsets = true;
+
+      // The language of the supplied audio
+      String languageCode = "en-US";
+      RecognitionConfig config =
+          RecognitionConfig.newBuilder()
+              .setEnableWordTimeOffsets(enableWordTimeOffsets)
+              .setLanguageCode(languageCode)
+              .build();
+      RecognitionAudio audio = RecognitionAudio.newBuilder().setUri(storageUri).build();
+      LongRunningRecognizeRequest request =
+          LongRunningRecognizeRequest.newBuilder().setConfig(config).setAudio(audio).build();
+      OperationFuture future =
+          speechClient.longRunningRecognizeAsync(request);
+
+      System.out.println("Waiting for operation to complete...");
+      LongRunningRecognizeResponse response = future.get();
+      // The first result includes start and end time word offsets
+      SpeechRecognitionResult result = response.getResultsList().get(0);
+      // First alternative is the most probable result
+      SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
+      System.out.printf("Transcript: %s\n", alternative.getTranscript());
+      // Print the start and end time of each word
+      for (WordInfo word : alternative.getWordsList()) {
+        System.out.printf("Word: %s\n", word.getWord());
+        System.out.printf(
+            "Start time: %s seconds %s nanos\n",
+            word.getStartTime().getSeconds(), word.getStartTime().getNanos());
+        System.out.printf(
+            "End time: %s seconds %s nanos\n",
+            word.getEndTime().getSeconds(), word.getEndTime().getNanos());
+      }
+    } catch (Exception exception) {
+      System.err.println("Failed to create the client due to: " + exception);
+    }
+  }
+  // [END speech_transcribe_async_word_time_offsets_gcs]
+
+  public static void main(String[] args) throws Exception {
+    Options options = new Options();
+    options.addOption(
+        Option.builder("").required(false).hasArg(true).longOpt("storage_uri").build());
+
+    CommandLine cl = (new DefaultParser()).parse(options, args);
+    String storageUri =
+        cl.getOptionValue("storage_uri", "gs://cloud-samples-data/speech/brooklyn_bridge.flac");
+
+    sampleLongRunningRecognize(storageUri);
+  }
+}
diff --git a/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeEnhancedModel.java b/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeEnhancedModel.java
new file mode 100644
index 000000000..8fa8eabb1
--- /dev/null
+++ b/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeEnhancedModel.java
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// DO NOT EDIT! This is a generated sample ("Request",  "speech_transcribe_enhanced_model")
+// sample-metadata:
+//   title: Using Enhanced Models (Local File)
+//   description: Transcribe a short audio file using an enhanced model
+//   usage: gradle run -PmainClass=com.google.cloud.examples.speech.v1.SpeechTranscribeEnhancedModel [--args='[--local_file_path "resources/hello.wav"]']
+
+package com.google.cloud.examples.speech.v1;
+
+import com.google.cloud.speech.v1.RecognitionAudio;
+import com.google.cloud.speech.v1.RecognitionConfig;
+import com.google.cloud.speech.v1.RecognizeRequest;
+import com.google.cloud.speech.v1.RecognizeResponse;
+import com.google.cloud.speech.v1.SpeechClient;
+import com.google.cloud.speech.v1.SpeechRecognitionAlternative;
+import com.google.cloud.speech.v1.SpeechRecognitionResult;
+import com.google.protobuf.ByteString;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.DefaultParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+
+public class SpeechTranscribeEnhancedModel {
+  // [START speech_transcribe_enhanced_model]
+  /*
+   * Please include the following imports to run this sample.
+   *
+   * import com.google.cloud.speech.v1.RecognitionAudio;
+   * import com.google.cloud.speech.v1.RecognitionConfig;
+   * import com.google.cloud.speech.v1.RecognizeRequest;
+   * import com.google.cloud.speech.v1.RecognizeResponse;
+   * import com.google.cloud.speech.v1.SpeechClient;
+   * import com.google.cloud.speech.v1.SpeechRecognitionAlternative;
+   * import com.google.cloud.speech.v1.SpeechRecognitionResult;
+   * import com.google.protobuf.ByteString;
+   * import java.nio.file.Files;
+   * import java.nio.file.Path;
+   * import java.nio.file.Paths;
+   */
+
+  /**
+   * Transcribe a short audio file using an enhanced model
+   *
+   * @param localFilePath Path to local audio file, e.g. /path/audio.wav
+   */
+  public static void sampleRecognize(String localFilePath) {
+    try (SpeechClient speechClient = SpeechClient.create()) {
+      // localFilePath = "resources/hello.wav";
+
+      // The enhanced model to use, e.g. phone_call
+      // Currently phone_call is the only model available as an enhanced model.
+      String model = "phone_call";
+
+      // Use an enhanced model for speech recognition (when set to true).
+      // Project must be eligible for requesting enhanced models.
+      // Enhanced speech models require that you opt-in to data logging.
+      boolean useEnhanced = true;
+
+      // The language of the supplied audio
+      String languageCode = "en-US";
+      RecognitionConfig config =
+          RecognitionConfig.newBuilder()
+              .setModel(model)
+              .setUseEnhanced(useEnhanced)
+              .setLanguageCode(languageCode)
+              .build();
+      Path path = Paths.get(localFilePath);
+      byte[] data = Files.readAllBytes(path);
+      ByteString content = ByteString.copyFrom(data);
+      RecognitionAudio audio = RecognitionAudio.newBuilder().setContent(content).build();
+      RecognizeRequest request =
+          RecognizeRequest.newBuilder().setConfig(config).setAudio(audio).build();
+      RecognizeResponse response = speechClient.recognize(request);
+      for (SpeechRecognitionResult result : response.getResultsList()) {
+        // First alternative is the most probable result
+        SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
+        System.out.printf("Transcript: %s\n", alternative.getTranscript());
+      }
+    } catch (Exception exception) {
+      System.err.println("Failed to create the client due to: " + exception);
+    }
+  }
+  // [END speech_transcribe_enhanced_model]
+
+  public static void main(String[] args) throws Exception {
+    Options options = new Options();
+    options.addOption(
+        Option.builder("").required(false).hasArg(true).longOpt("local_file_path").build());
+
+    CommandLine cl = (new DefaultParser()).parse(options, args);
+    String localFilePath = cl.getOptionValue("local_file_path", "resources/hello.wav");
+
+    sampleRecognize(localFilePath);
+  }
+}
diff --git a/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeModelSelection.java b/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeModelSelection.java
new file mode 100644
index 000000000..5994ae7c9
--- /dev/null
+++ b/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeModelSelection.java
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// DO NOT EDIT! This is a generated sample ("Request",  "speech_transcribe_model_selection")
+// sample-metadata:
+//   title: Selecting a Transcription Model (Local File)
+//   description: Transcribe a short audio file using a specified transcription model
+//   usage: gradle run -PmainClass=com.google.cloud.examples.speech.v1.SpeechTranscribeModelSelection [--args='[--local_file_path "resources/hello.wav"] [--model "phone_call"]']
+
+package com.google.cloud.examples.speech.v1;
+
+import com.google.cloud.speech.v1.RecognitionAudio;
+import com.google.cloud.speech.v1.RecognitionConfig;
+import com.google.cloud.speech.v1.RecognizeRequest;
+import com.google.cloud.speech.v1.RecognizeResponse;
+import com.google.cloud.speech.v1.SpeechClient;
+import com.google.cloud.speech.v1.SpeechRecognitionAlternative;
+import com.google.cloud.speech.v1.SpeechRecognitionResult;
+import com.google.protobuf.ByteString;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.DefaultParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+
+public class SpeechTranscribeModelSelection {
+  // [START speech_transcribe_model_selection]
+  /*
+   * Please include the following imports to run this sample.
+   *
+   * import com.google.cloud.speech.v1.RecognitionAudio;
+   * import com.google.cloud.speech.v1.RecognitionConfig;
+   * import com.google.cloud.speech.v1.RecognizeRequest;
+   * import com.google.cloud.speech.v1.RecognizeResponse;
+   * import com.google.cloud.speech.v1.SpeechClient;
+   * import com.google.cloud.speech.v1.SpeechRecognitionAlternative;
+   * import com.google.cloud.speech.v1.SpeechRecognitionResult;
+   * import com.google.protobuf.ByteString;
+   * import java.nio.file.Files;
+   * import java.nio.file.Path;
+   * import java.nio.file.Paths;
+   */
+
+  /**
+   * Transcribe a short audio file using a specified transcription model
+   *
+   * @param localFilePath Path to local audio file, e.g. /path/audio.wav
+   * @param model The transcription model to use, e.g. video, phone_call, default For a list of
+   *     available transcription models, see:
+   *     https://cloud.google.com/speech-to-text/docs/transcription-model#transcription_models
+   */
+  public static void sampleRecognize(String localFilePath, String model) {
+    try (SpeechClient speechClient = SpeechClient.create()) {
+      // localFilePath = "resources/hello.wav";
+      // model = "phone_call";
+
+      // The language of the supplied audio
+      String languageCode = "en-US";
+      RecognitionConfig config =
+          RecognitionConfig.newBuilder().setModel(model).setLanguageCode(languageCode).build();
+      Path path = Paths.get(localFilePath);
+      byte[] data = Files.readAllBytes(path);
+      ByteString content = ByteString.copyFrom(data);
+      RecognitionAudio audio = RecognitionAudio.newBuilder().setContent(content).build();
+      RecognizeRequest request =
+          RecognizeRequest.newBuilder().setConfig(config).setAudio(audio).build();
+      RecognizeResponse response = speechClient.recognize(request);
+      for (SpeechRecognitionResult result : response.getResultsList()) {
+        // First alternative is the most probable result
+        SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
+        System.out.printf("Transcript: %s\n", alternative.getTranscript());
+      }
+    } catch (Exception exception) {
+      System.err.println("Failed to create the client due to: " + exception);
+    }
+  }
+  // [END speech_transcribe_model_selection]
+
+  public static void main(String[] args) throws Exception {
+    Options options = new Options();
+    options.addOption(
+        Option.builder("").required(false).hasArg(true).longOpt("local_file_path").build());
+    options.addOption(Option.builder("").required(false).hasArg(true).longOpt("model").build());
+
+    CommandLine cl = (new DefaultParser()).parse(options, args);
+    String localFilePath = cl.getOptionValue("local_file_path", "resources/hello.wav");
+    String model = cl.getOptionValue("model", "phone_call");
+
+    sampleRecognize(localFilePath, model);
+  }
+}
diff --git a/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeModelSelectionGcs.java b/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeModelSelectionGcs.java
new file mode 100644
index 000000000..99d17c944
--- /dev/null
+++ b/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeModelSelectionGcs.java
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// DO NOT EDIT! This is a generated sample ("Request",  "speech_transcribe_model_selection_gcs")
+// sample-metadata:
+//   title: Selecting a Transcription Model (Cloud Storage)
+//   description: Transcribe a short audio file from Cloud Storage using a specified transcription model
+//   usage: gradle run -PmainClass=com.google.cloud.examples.speech.v1.SpeechTranscribeModelSelectionGcs [--args='[--storage_uri "gs://cloud-samples-data/speech/hello.wav"] [--model "phone_call"]']
+
+package com.google.cloud.examples.speech.v1;
+
+import com.google.cloud.speech.v1.RecognitionAudio;
+import com.google.cloud.speech.v1.RecognitionConfig;
+import com.google.cloud.speech.v1.RecognizeRequest;
+import com.google.cloud.speech.v1.RecognizeResponse;
+import com.google.cloud.speech.v1.SpeechClient;
+import com.google.cloud.speech.v1.SpeechRecognitionAlternative;
+import com.google.cloud.speech.v1.SpeechRecognitionResult;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.DefaultParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+
+public class SpeechTranscribeModelSelectionGcs {
+  // [START speech_transcribe_model_selection_gcs]
+  /*
+   * Please include the following imports to run this sample.
+   *
+   * import com.google.cloud.speech.v1.RecognitionAudio;
+   * import com.google.cloud.speech.v1.RecognitionConfig;
+   * import com.google.cloud.speech.v1.RecognizeRequest;
+   * import com.google.cloud.speech.v1.RecognizeResponse;
+   * import com.google.cloud.speech.v1.SpeechClient;
+   * import com.google.cloud.speech.v1.SpeechRecognitionAlternative;
+   * import com.google.cloud.speech.v1.SpeechRecognitionResult;
+   */
+
+  /**
+   * Transcribe a short audio file from Cloud Storage using a specified transcription model
+   *
+   * @param storageUri URI for audio file in Cloud Storage, e.g. gs://[BUCKET]/[FILE]
+   * @param model The transcription model to use, e.g. video, phone_call, default For a list of
+   *     available transcription models, see:
+   *     https://cloud.google.com/speech-to-text/docs/transcription-model#transcription_models
+   */
+  public static void sampleRecognize(String storageUri, String model) {
+    try (SpeechClient speechClient = SpeechClient.create()) {
+      // storageUri = "gs://cloud-samples-data/speech/hello.wav";
+      // model = "phone_call";
+
+      // The language of the supplied audio
+      String languageCode = "en-US";
+      RecognitionConfig config =
+          RecognitionConfig.newBuilder().setModel(model).setLanguageCode(languageCode).build();
+      RecognitionAudio audio = RecognitionAudio.newBuilder().setUri(storageUri).build();
+      RecognizeRequest request =
+          RecognizeRequest.newBuilder().setConfig(config).setAudio(audio).build();
+      RecognizeResponse response = speechClient.recognize(request);
+      for (SpeechRecognitionResult result : response.getResultsList()) {
+        // First alternative is the most probable result
+        SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
+        System.out.printf("Transcript: %s\n", alternative.getTranscript());
+      }
+    } catch (Exception exception) {
+      System.err.println("Failed to create the client due to: " + exception);
+    }
+  }
+  // [END speech_transcribe_model_selection_gcs]
+
+  public static void main(String[] args) throws Exception {
+    Options options = new Options();
+    options.addOption(
+        Option.builder("").required(false).hasArg(true).longOpt("storage_uri").build());
+    options.addOption(Option.builder("").required(false).hasArg(true).longOpt("model").build());
+
+    CommandLine cl = (new DefaultParser()).parse(options, args);
+    String storageUri =
+        cl.getOptionValue("storage_uri", "gs://cloud-samples-data/speech/hello.wav");
+    String model = cl.getOptionValue("model", "phone_call");
+
+    sampleRecognize(storageUri, model);
+  }
+}
diff --git a/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeMultichannel.java b/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeMultichannel.java
new file mode 100644
index 000000000..c03e12d47
--- /dev/null
+++ b/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeMultichannel.java
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// DO NOT EDIT! This is a generated sample ("Request",  "speech_transcribe_multichannel")
+// sample-metadata:
+//   title: Multi-Channel Audio Transcription (Local File)
+//   description: Transcribe a short audio file with multiple channels
+//   usage: gradle run -PmainClass=com.google.cloud.examples.speech.v1.SpeechTranscribeMultichannel [--args='[--local_file_path "resources/multi.wav"]']
+
+package com.google.cloud.examples.speech.v1;
+
+import com.google.cloud.speech.v1.RecognitionAudio;
+import com.google.cloud.speech.v1.RecognitionConfig;
+import com.google.cloud.speech.v1.RecognizeRequest;
+import com.google.cloud.speech.v1.RecognizeResponse;
+import com.google.cloud.speech.v1.SpeechClient;
+import com.google.cloud.speech.v1.SpeechRecognitionAlternative;
+import com.google.cloud.speech.v1.SpeechRecognitionResult;
+import com.google.protobuf.ByteString;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.DefaultParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+
+public class SpeechTranscribeMultichannel {
+  // [START speech_transcribe_multichannel]
+  /*
+   * Please include the following imports to run this sample.
+   *
+   * import com.google.cloud.speech.v1.RecognitionAudio;
+   * import com.google.cloud.speech.v1.RecognitionConfig;
+   * import com.google.cloud.speech.v1.RecognizeRequest;
+   * import com.google.cloud.speech.v1.RecognizeResponse;
+   * import com.google.cloud.speech.v1.SpeechClient;
+   * import com.google.cloud.speech.v1.SpeechRecognitionAlternative;
+   * import com.google.cloud.speech.v1.SpeechRecognitionResult;
+   * import com.google.protobuf.ByteString;
+   * import java.nio.file.Files;
+   * import java.nio.file.Path;
+   * import java.nio.file.Paths;
+   */
+
+  /**
+   * Transcribe a short audio file with multiple channels
+   *
+   * @param localFilePath Path to local audio file, e.g. /path/audio.wav
+   */
+  public static void sampleRecognize(String localFilePath) {
+    try (SpeechClient speechClient = SpeechClient.create()) {
+      // localFilePath = "resources/multi.wav";
+
+      // The number of channels in the input audio file (optional)
+      int audioChannelCount = 2;
+
+      // When set to true, each audio channel will be recognized separately.
+      // The recognition result will contain a channel_tag field to state which
+      // channel that result belongs to
+      boolean enableSeparateRecognitionPerChannel = true;
+
+      // The language of the supplied audio
+      String languageCode = "en-US";
+      RecognitionConfig config =
+          RecognitionConfig.newBuilder()
+              .setAudioChannelCount(audioChannelCount)
+              .setEnableSeparateRecognitionPerChannel(enableSeparateRecognitionPerChannel)
+              .setLanguageCode(languageCode)
+              .build();
+      Path path = Paths.get(localFilePath);
+      byte[] data = Files.readAllBytes(path);
+      ByteString content = ByteString.copyFrom(data);
+      RecognitionAudio audio = RecognitionAudio.newBuilder().setContent(content).build();
+      RecognizeRequest request =
+          RecognizeRequest.newBuilder().setConfig(config).setAudio(audio).build();
+      RecognizeResponse response = speechClient.recognize(request);
+      for (SpeechRecognitionResult result : response.getResultsList()) {
+        // channelTag to recognize which audio channel this result is for
+        System.out.printf("Channel tag: %s\n", result.getChannelTag());
+        // First alternative is the most probable result
+        SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
+        System.out.printf("Transcript: %s\n", alternative.getTranscript());
+      }
+    } catch (Exception exception) {
+      System.err.println("Failed to create the client due to: " + exception);
+    }
+  }
+  // [END speech_transcribe_multichannel]
+
+  public static void main(String[] args) throws Exception {
+    Options options = new Options();
+    options.addOption(
+        Option.builder("").required(false).hasArg(true).longOpt("local_file_path").build());
+
+    CommandLine cl = (new DefaultParser()).parse(options, args);
+    String localFilePath = cl.getOptionValue("local_file_path", "resources/multi.wav");
+
+    sampleRecognize(localFilePath);
+  }
+}
diff --git a/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeMultichannelGcs.java b/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeMultichannelGcs.java
new file mode 100644
index 000000000..a3fca3497
--- /dev/null
+++ b/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeMultichannelGcs.java
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// DO NOT EDIT! This is a generated sample ("Request",  "speech_transcribe_multichannel_gcs")
+// sample-metadata:
+//   title: Multi-Channel Audio Transcription (Cloud Storage)
+//   description: Transcribe a short audio file from Cloud Storage with multiple channels
+//   usage: gradle run -PmainClass=com.google.cloud.examples.speech.v1.SpeechTranscribeMultichannelGcs [--args='[--storage_uri "gs://cloud-samples-data/speech/multi.wav"]']
+
+package com.google.cloud.examples.speech.v1;
+
+import com.google.cloud.speech.v1.RecognitionAudio;
+import com.google.cloud.speech.v1.RecognitionConfig;
+import com.google.cloud.speech.v1.RecognizeRequest;
+import com.google.cloud.speech.v1.RecognizeResponse;
+import com.google.cloud.speech.v1.SpeechClient;
+import com.google.cloud.speech.v1.SpeechRecognitionAlternative;
+import com.google.cloud.speech.v1.SpeechRecognitionResult;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.DefaultParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+
+public class SpeechTranscribeMultichannelGcs {
+  // [START speech_transcribe_multichannel_gcs]
+  /*
+   * Please include the following imports to run this sample.
+   *
+   * import com.google.cloud.speech.v1.RecognitionAudio;
+   * import com.google.cloud.speech.v1.RecognitionConfig;
+   * import com.google.cloud.speech.v1.RecognizeRequest;
+   * import com.google.cloud.speech.v1.RecognizeResponse;
+   * import com.google.cloud.speech.v1.SpeechClient;
+   * import com.google.cloud.speech.v1.SpeechRecognitionAlternative;
+   * import com.google.cloud.speech.v1.SpeechRecognitionResult;
+   */
+
+  /**
+   * Transcribe a short audio file from Cloud Storage with multiple channels
+   *
+   * @param storageUri URI for audio file in Cloud Storage, e.g. gs://[BUCKET]/[FILE]
+   */
+  public static void sampleRecognize(String storageUri) {
+    try (SpeechClient speechClient = SpeechClient.create()) {
+      // storageUri = "gs://cloud-samples-data/speech/multi.wav";
+
+      // The number of channels in the input audio file (optional)
+      int audioChannelCount = 2;
+
+      // When set to true, each audio channel will be recognized separately.
+      // The recognition result will contain a channel_tag field to state which
+      // channel that result belongs to
+      boolean enableSeparateRecognitionPerChannel = true;
+
+      // The language of the supplied audio
+      String languageCode = "en-US";
+      RecognitionConfig config =
+          RecognitionConfig.newBuilder()
+              .setAudioChannelCount(audioChannelCount)
+              .setEnableSeparateRecognitionPerChannel(enableSeparateRecognitionPerChannel)
+              .setLanguageCode(languageCode)
+              .build();
+      RecognitionAudio audio = RecognitionAudio.newBuilder().setUri(storageUri).build();
+      RecognizeRequest request =
+          RecognizeRequest.newBuilder().setConfig(config).setAudio(audio).build();
+      RecognizeResponse response = speechClient.recognize(request);
+      for (SpeechRecognitionResult result : response.getResultsList()) {
+        // channelTag to recognize which audio channel this result is for
+        System.out.printf("Channel tag: %s\n", result.getChannelTag());
+        // First alternative is the most probable result
+        SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
+        System.out.printf("Transcript: %s\n", alternative.getTranscript());
+      }
+    } catch (Exception exception) {
+      System.err.println("Failed to create the client due to: " + exception);
+    }
+  }
+  // [END speech_transcribe_multichannel_gcs]
+
+  public static void main(String[] args) throws Exception {
+    Options options = new Options();
+    options.addOption(
+        Option.builder("").required(false).hasArg(true).longOpt("storage_uri").build());
+
+    CommandLine cl = (new DefaultParser()).parse(options, args);
+    String storageUri =
+        cl.getOptionValue("storage_uri", "gs://cloud-samples-data/speech/multi.wav");
+
+    sampleRecognize(storageUri);
+  }
+}
diff --git a/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeSync.java b/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeSync.java
new file mode 100644
index 000000000..8ee11816c
--- /dev/null
+++ b/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeSync.java
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// DO NOT EDIT! This is a generated sample ("Request",  "speech_transcribe_sync")
+// sample-metadata:
+//   title: Transcribe Audio File (Local File)
+//   description: Transcribe a short audio file using synchronous speech recognition
+//   usage: gradle run -PmainClass=com.google.cloud.examples.speech.v1.SpeechTranscribeSync [--args='[--local_file_path "resources/brooklyn_bridge.raw"]']
+
+package com.google.cloud.examples.speech.v1;
+
+import com.google.cloud.speech.v1.RecognitionAudio;
+import com.google.cloud.speech.v1.RecognitionConfig;
+import com.google.cloud.speech.v1.RecognizeRequest;
+import com.google.cloud.speech.v1.RecognizeResponse;
+import com.google.cloud.speech.v1.SpeechClient;
+import com.google.cloud.speech.v1.SpeechRecognitionAlternative;
+import com.google.cloud.speech.v1.SpeechRecognitionResult;
+import com.google.protobuf.ByteString;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.DefaultParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+
+public class SpeechTranscribeSync {
+  // [START speech_transcribe_sync]
+  /*
+   * Please include the following imports to run this sample.
+   *
+   * import com.google.cloud.speech.v1.RecognitionAudio;
+   * import com.google.cloud.speech.v1.RecognitionConfig;
+   * import com.google.cloud.speech.v1.RecognizeRequest;
+   * import com.google.cloud.speech.v1.RecognizeResponse;
+   * import com.google.cloud.speech.v1.SpeechClient;
+   * import com.google.cloud.speech.v1.SpeechRecognitionAlternative;
+   * import com.google.cloud.speech.v1.SpeechRecognitionResult;
+   * import com.google.protobuf.ByteString;
+   * import java.nio.file.Files;
+   * import java.nio.file.Path;
+   * import java.nio.file.Paths;
+   */
+
+  /**
+   * Transcribe a short audio file using synchronous speech recognition
+   *
+   * @param localFilePath Path to local audio file, e.g. /path/audio.wav
+   */
+  public static void sampleRecognize(String localFilePath) {
+    try (SpeechClient speechClient = SpeechClient.create()) {
+      // localFilePath = "resources/brooklyn_bridge.raw";
+
+      // The language of the supplied audio
+      String languageCode = "en-US";
+
+      // Sample rate in Hertz of the audio data sent
+      int sampleRateHertz = 16000;
+
+      // Encoding of audio data sent. This sample sets this explicitly.
+      // This field is optional for FLAC and WAV audio formats.
+      RecognitionConfig.AudioEncoding encoding = RecognitionConfig.AudioEncoding.LINEAR16;
+      RecognitionConfig config =
+          RecognitionConfig.newBuilder()
+              .setLanguageCode(languageCode)
+              .setSampleRateHertz(sampleRateHertz)
+              .setEncoding(encoding)
+              .build();
+      Path path = Paths.get(localFilePath);
+      byte[] data = Files.readAllBytes(path);
+      ByteString content = ByteString.copyFrom(data);
+      RecognitionAudio audio = RecognitionAudio.newBuilder().setContent(content).build();
+      RecognizeRequest request =
+          RecognizeRequest.newBuilder().setConfig(config).setAudio(audio).build();
+      RecognizeResponse response = speechClient.recognize(request);
+      for (SpeechRecognitionResult result : response.getResultsList()) {
+        // First alternative is the most probable result
+        SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
+        System.out.printf("Transcript: %s\n", alternative.getTranscript());
+      }
+    } catch (Exception exception) {
+      System.err.println("Failed to create the client due to: " + exception);
+    }
+  }
+  // [END speech_transcribe_sync]
+
+  public static void main(String[] args) throws Exception {
+    Options options = new Options();
+    options.addOption(
+        Option.builder("").required(false).hasArg(true).longOpt("local_file_path").build());
+
+    CommandLine cl = (new DefaultParser()).parse(options, args);
+    String localFilePath = cl.getOptionValue("local_file_path", "resources/brooklyn_bridge.raw");
+
+    sampleRecognize(localFilePath);
+  }
+}
diff --git a/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeSyncGcs.java b/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeSyncGcs.java
new file mode 100644
index 000000000..de8474241
--- /dev/null
+++ b/samples/src/main/java/com/google/cloud/examples/speech/v1/SpeechTranscribeSyncGcs.java
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// DO NOT EDIT! This is a generated sample ("Request",  "speech_transcribe_sync_gcs")
+// sample-metadata:
+//   title: Transcript Audio File (Cloud Storage)
+//   description: Transcribe short audio file from Cloud Storage using synchronous speech recognition
+//   usage: gradle run -PmainClass=com.google.cloud.examples.speech.v1.SpeechTranscribeSyncGcs [--args='[--storage_uri "gs://cloud-samples-data/speech/brooklyn_bridge.raw"]']
+
+package com.google.cloud.examples.speech.v1;
+
+import com.google.cloud.speech.v1.RecognitionAudio;
+import com.google.cloud.speech.v1.RecognitionConfig;
+import com.google.cloud.speech.v1.RecognizeRequest;
+import com.google.cloud.speech.v1.RecognizeResponse;
+import com.google.cloud.speech.v1.SpeechClient;
+import com.google.cloud.speech.v1.SpeechRecognitionAlternative;
+import com.google.cloud.speech.v1.SpeechRecognitionResult;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.DefaultParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+
+public class SpeechTranscribeSyncGcs {
+  // [START speech_transcribe_sync_gcs]
+  /*
+   * Please include the following imports to run this sample.
+   *
+   * import com.google.cloud.speech.v1.RecognitionAudio;
+   * import com.google.cloud.speech.v1.RecognitionConfig;
+   * import com.google.cloud.speech.v1.RecognizeRequest;
+   * import com.google.cloud.speech.v1.RecognizeResponse;
+   * import com.google.cloud.speech.v1.SpeechClient;
+   * import com.google.cloud.speech.v1.SpeechRecognitionAlternative;
+   * import com.google.cloud.speech.v1.SpeechRecognitionResult;
+   */
+
+  /**
+   * Transcribe short audio file from Cloud Storage using synchronous speech recognition
+   *
+   * @param storageUri URI for audio file in Cloud Storage, e.g. gs://[BUCKET]/[FILE]
+   */
+  public static void sampleRecognize(String storageUri) {
+    try (SpeechClient speechClient = SpeechClient.create()) {
+      // storageUri = "gs://cloud-samples-data/speech/brooklyn_bridge.raw";
+
+      // Sample rate in Hertz of the audio data sent
+      int sampleRateHertz = 16000;
+
+      // The language of the supplied audio
+      String languageCode = "en-US";
+
+      // Encoding of audio data sent. This sample sets this explicitly.
+      // This field is optional for FLAC and WAV audio formats.
+      RecognitionConfig.AudioEncoding encoding = RecognitionConfig.AudioEncoding.LINEAR16;
+      RecognitionConfig config =
+          RecognitionConfig.newBuilder()
+              .setSampleRateHertz(sampleRateHertz)
+              .setLanguageCode(languageCode)
+              .setEncoding(encoding)
+              .build();
+      RecognitionAudio audio = RecognitionAudio.newBuilder().setUri(storageUri).build();
+      RecognizeRequest request =
+          RecognizeRequest.newBuilder().setConfig(config).setAudio(audio).build();
+      RecognizeResponse response = speechClient.recognize(request);
+      for (SpeechRecognitionResult result : response.getResultsList()) {
+        // First alternative is the most probable result
+        SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
+        System.out.printf("Transcript: %s\n", alternative.getTranscript());
+      }
+    } catch (Exception exception) {
+      System.err.println("Failed to create the client due to: " + exception);
+    }
+  }
+  // [END speech_transcribe_sync_gcs]
+
+  public static void main(String[] args) throws Exception {
+    Options options = new Options();
+    options.addOption(
+        Option.builder("").required(false).hasArg(true).longOpt("storage_uri").build());
+
+    CommandLine cl = (new DefaultParser()).parse(options, args);
+    String storageUri =
+        cl.getOptionValue("storage_uri", "gs://cloud-samples-data/speech/brooklyn_bridge.raw");
+
+    sampleRecognize(storageUri);
+  }
+}
diff --git a/samples/src/main/java/com/google/cloud/examples/speech/v1/speech.java.20191017.083206.manifest.yaml b/samples/src/main/java/com/google/cloud/examples/speech/v1/speech.java.20191017.083206.manifest.yaml
new file mode 100644
index 000000000..9a823d381
--- /dev/null
+++ b/samples/src/main/java/com/google/cloud/examples/speech/v1/speech.java.20191017.083206.manifest.yaml
@@ -0,0 +1,60 @@
+---
+type: manifest/samples
+schema_version: 3
+java: &java
+  environment: java
+  bin: mvn exec:java
+  base_path: samples/src/main/java/com/google/cloud/examples/speech/v1
+  package: com.google.cloud.examples.speech.v1
+  invocation: {bin} -Dexec.mainClass={class} -Dexec.args='@args'
+samples:
+- <<: *java
+  sample: "speech_transcribe_model_selection_gcs"
+  path: "{base_path}/SpeechTranscribeModelSelectionGcs.java"
+  class: {package}.SpeechTranscribeModelSelectionGcs
+  region_tag: "speech_transcribe_model_selection_gcs"
+- <<: *java
+  sample: "speech_transcribe_model_selection"
+  path: "{base_path}/SpeechTranscribeModelSelection.java"
+  class: {package}.SpeechTranscribeModelSelection
+  region_tag: "speech_transcribe_model_selection"
+- <<: *java
+  sample: "speech_transcribe_multichannel_gcs"
+  path: "{base_path}/SpeechTranscribeMultichannelGcs.java"
+  class: {package}.SpeechTranscribeMultichannelGcs
+  region_tag: "speech_transcribe_multichannel_gcs"
+- <<: *java
+  sample: "speech_transcribe_sync_gcs"
+  path: "{base_path}/SpeechTranscribeSyncGcs.java"
+  class: {package}.SpeechTranscribeSyncGcs
+  region_tag: "speech_transcribe_sync_gcs"
+- <<: *java
+  sample: "speech_transcribe_enhanced_model"
+  path: "{base_path}/SpeechTranscribeEnhancedModel.java"
+  class: {package}.SpeechTranscribeEnhancedModel
+  region_tag: "speech_transcribe_enhanced_model"
+- <<: *java
+  sample: "speech_transcribe_multichannel"
+  path: "{base_path}/SpeechTranscribeMultichannel.java"
+  class: {package}.SpeechTranscribeMultichannel
+  region_tag: "speech_transcribe_multichannel"
+- <<: *java
+  sample: "speech_transcribe_sync"
+  path: "{base_path}/SpeechTranscribeSync.java"
+  class: {package}.SpeechTranscribeSync
+  region_tag: "speech_transcribe_sync"
+- <<: *java
+  sample: "speech_transcribe_async"
+  path: "{base_path}/SpeechTranscribeAsync.java"
+  class: {package}.SpeechTranscribeAsync
+  region_tag: "speech_transcribe_async"
+- <<: *java
+  sample: "speech_transcribe_async_gcs"
+  path: "{base_path}/SpeechTranscribeAsyncGcs.java"
+  class: {package}.SpeechTranscribeAsyncGcs
+  region_tag: "speech_transcribe_async_gcs"
+- <<: *java
+  sample: "speech_transcribe_async_word_time_offsets_gcs"
+  path: "{base_path}/SpeechTranscribeAsyncWordTimeOffsetsGcs.java"
+  class: {package}.SpeechTranscribeAsyncWordTimeOffsetsGcs
+  region_tag: "speech_transcribe_async_word_time_offsets_gcs"
diff --git a/samples/src/main/java/com/google/cloud/examples/speech/v1p1beta1/SpeechAdaptationBeta.java b/samples/src/main/java/com/google/cloud/examples/speech/v1p1beta1/SpeechAdaptationBeta.java
new file mode 100644
index 000000000..d137ae43c
--- /dev/null
+++ b/samples/src/main/java/com/google/cloud/examples/speech/v1p1beta1/SpeechAdaptationBeta.java
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// DO NOT EDIT! This is a generated sample ("Request",  "speech_adaptation_beta")
+// sample-metadata:
+//   title: Speech Adaptation (Cloud Storage)
+//   description: Transcribe a short audio file with speech adaptation.
+//   usage: gradle run -PmainClass=com.google.cloud.examples.speech.v1p1beta1.SpeechAdaptationBeta [--args='[--storage_uri "gs://cloud-samples-data/speech/brooklyn_bridge.mp3"] [--phrase "Brooklyn Bridge"]']
+
+package com.google.cloud.examples.speech.v1p1beta1;
+
+import com.google.cloud.speech.v1p1beta1.RecognitionAudio;
+import com.google.cloud.speech.v1p1beta1.RecognitionConfig;
+import com.google.cloud.speech.v1p1beta1.RecognizeRequest;
+import com.google.cloud.speech.v1p1beta1.RecognizeResponse;
+import com.google.cloud.speech.v1p1beta1.SpeechClient;
+import com.google.cloud.speech.v1p1beta1.SpeechContext;
+import com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative;
+import com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult;
+import java.util.Arrays;
+import java.util.List;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.DefaultParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+
+public class SpeechAdaptationBeta {
+  // [START speech_adaptation_beta]
+  /*
+   * Please include the following imports to run this sample.
+   *
+   * import com.google.cloud.speech.v1p1beta1.RecognitionAudio;
+   * import com.google.cloud.speech.v1p1beta1.RecognitionConfig;
+   * import com.google.cloud.speech.v1p1beta1.RecognizeRequest;
+   * import com.google.cloud.speech.v1p1beta1.RecognizeResponse;
+   * import com.google.cloud.speech.v1p1beta1.SpeechClient;
+   * import com.google.cloud.speech.v1p1beta1.SpeechContext;
+   * import com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative;
+   * import com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult;
+   * import java.util.Arrays;
+   * import java.util.List;
+   */
+
+  /**
+   * Transcribe a short audio file with speech adaptation.
+   *
+   * @param storageUri URI for audio file in Cloud Storage, e.g. gs://[BUCKET]/[FILE]
+   * @param phrase Phrase "hints" help recognize the specified phrases from your audio.
+   */
+  public static void sampleRecognize(String storageUri, String phrase) {
+    try (SpeechClient speechClient = SpeechClient.create()) {
+      // storageUri = "gs://cloud-samples-data/speech/brooklyn_bridge.mp3";
+      // phrase = "Brooklyn Bridge";
+      List phrases = Arrays.asList(phrase);
+
+      // Hint Boost. This value increases the probability that a specific
+      // phrase will be recognized over other similar sounding phrases.
+      // The higher the boost, the higher the chance of false positive
+      // recognition as well. Can accept wide range of positive values.
+      // Most use cases are best served with values between 0 and 20.
+      // Using a binary search happroach may help you find the optimal value.
+      float boost = 20.0F;
+      SpeechContext speechContextsElement =
+          SpeechContext.newBuilder().addAllPhrases(phrases).setBoost(boost).build();
+      List speechContexts = Arrays.asList(speechContextsElement);
+
+      // Sample rate in Hertz of the audio data sent
+      int sampleRateHertz = 44100;
+
+      // The language of the supplied audio
+      String languageCode = "en-US";
+
+      // Encoding of audio data sent. This sample sets this explicitly.
+      // This field is optional for FLAC and WAV audio formats.
+      RecognitionConfig.AudioEncoding encoding = RecognitionConfig.AudioEncoding.MP3;
+      RecognitionConfig config =
+          RecognitionConfig.newBuilder()
+              .addAllSpeechContexts(speechContexts)
+              .setSampleRateHertz(sampleRateHertz)
+              .setLanguageCode(languageCode)
+              .setEncoding(encoding)
+              .build();
+      RecognitionAudio audio = RecognitionAudio.newBuilder().setUri(storageUri).build();
+      RecognizeRequest request =
+          RecognizeRequest.newBuilder().setConfig(config).setAudio(audio).build();
+      RecognizeResponse response = speechClient.recognize(request);
+      for (SpeechRecognitionResult result : response.getResultsList()) {
+        // First alternative is the most probable result
+        SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
+        System.out.printf("Transcript: %s\n", alternative.getTranscript());
+      }
+    } catch (Exception exception) {
+      System.err.println("Failed to create the client due to: " + exception);
+    }
+  }
+  // [END speech_adaptation_beta]
+
+  public static void main(String[] args) throws Exception {
+    Options options = new Options();
+    options.addOption(
+        Option.builder("").required(false).hasArg(true).longOpt("storage_uri").build());
+    options.addOption(Option.builder("").required(false).hasArg(true).longOpt("phrase").build());
+
+    CommandLine cl = (new DefaultParser()).parse(options, args);
+    String storageUri =
+        cl.getOptionValue("storage_uri", "gs://cloud-samples-data/speech/brooklyn_bridge.mp3");
+    String phrase = cl.getOptionValue("phrase", "Brooklyn Bridge");
+
+    sampleRecognize(storageUri, phrase);
+  }
+}
diff --git a/samples/src/main/java/com/google/cloud/examples/speech/v1p1beta1/SpeechContextsClassesBeta.java b/samples/src/main/java/com/google/cloud/examples/speech/v1p1beta1/SpeechContextsClassesBeta.java
new file mode 100644
index 000000000..670cfff6c
--- /dev/null
+++ b/samples/src/main/java/com/google/cloud/examples/speech/v1p1beta1/SpeechContextsClassesBeta.java
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// DO NOT EDIT! This is a generated sample ("Request",  "speech_contexts_classes_beta")
+// sample-metadata:
+//   title: Using Context Classes (Cloud Storage)
+//   description: Transcribe a short audio file with static context classes.
+//   usage: gradle run -PmainClass=com.google.cloud.examples.speech.v1p1beta1.SpeechContextsClassesBeta [--args='[--storage_uri "gs://cloud-samples-data/speech/time.mp3"] [--phrase "$TIME"]']
+
+package com.google.cloud.examples.speech.v1p1beta1;
+
+import com.google.cloud.speech.v1p1beta1.RecognitionAudio;
+import com.google.cloud.speech.v1p1beta1.RecognitionConfig;
+import com.google.cloud.speech.v1p1beta1.RecognizeRequest;
+import com.google.cloud.speech.v1p1beta1.RecognizeResponse;
+import com.google.cloud.speech.v1p1beta1.SpeechClient;
+import com.google.cloud.speech.v1p1beta1.SpeechContext;
+import com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative;
+import com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult;
+import java.util.Arrays;
+import java.util.List;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.DefaultParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+
+public class SpeechContextsClassesBeta {
+  // [START speech_contexts_classes_beta]
+  /*
+   * Please include the following imports to run this sample.
+   *
+   * import com.google.cloud.speech.v1p1beta1.RecognitionAudio;
+   * import com.google.cloud.speech.v1p1beta1.RecognitionConfig;
+   * import com.google.cloud.speech.v1p1beta1.RecognizeRequest;
+   * import com.google.cloud.speech.v1p1beta1.RecognizeResponse;
+   * import com.google.cloud.speech.v1p1beta1.SpeechClient;
+   * import com.google.cloud.speech.v1p1beta1.SpeechContext;
+   * import com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative;
+   * import com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult;
+   * import java.util.Arrays;
+   * import java.util.List;
+   */
+
+  /**
+   * Transcribe a short audio file with static context classes.
+   *
+   * @param storageUri URI for audio file in Cloud Storage, e.g. gs://[BUCKET]/[FILE]
+   * @param phrase Phrase "hints" help recognize the specified phrases from your audio. In this
+   *     sample we are using a static class phrase ($TIME). Classes represent groups of words that
+   *     represent common concepts that occur in natural language.
+   */
+  public static void sampleRecognize(String storageUri, String phrase) {
+    try (SpeechClient speechClient = SpeechClient.create()) {
+      // storageUri = "gs://cloud-samples-data/speech/time.mp3";
+      // phrase = "$TIME";
+      List phrases = Arrays.asList(phrase);
+      SpeechContext speechContextsElement =
+          SpeechContext.newBuilder().addAllPhrases(phrases).build();
+      List speechContexts = Arrays.asList(speechContextsElement);
+
+      // The language of the supplied audio
+      String languageCode = "en-US";
+
+      // Sample rate in Hertz of the audio data sent
+      int sampleRateHertz = 24000;
+
+      // Encoding of audio data sent. This sample sets this explicitly.
+      // This field is optional for FLAC and WAV audio formats.
+      RecognitionConfig.AudioEncoding encoding = RecognitionConfig.AudioEncoding.MP3;
+      RecognitionConfig config =
+          RecognitionConfig.newBuilder()
+              .addAllSpeechContexts(speechContexts)
+              .setLanguageCode(languageCode)
+              .setSampleRateHertz(sampleRateHertz)
+              .setEncoding(encoding)
+              .build();
+      RecognitionAudio audio = RecognitionAudio.newBuilder().setUri(storageUri).build();
+      RecognizeRequest request =
+          RecognizeRequest.newBuilder().setConfig(config).setAudio(audio).build();
+      RecognizeResponse response = speechClient.recognize(request);
+      for (SpeechRecognitionResult result : response.getResultsList()) {
+        // First alternative is the most probable result
+        SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
+        System.out.printf("Transcript: %s\n", alternative.getTranscript());
+      }
+    } catch (Exception exception) {
+      System.err.println("Failed to create the client due to: " + exception);
+    }
+  }
+  // [END speech_contexts_classes_beta]
+
+  public static void main(String[] args) throws Exception {
+    Options options = new Options();
+    options.addOption(
+        Option.builder("").required(false).hasArg(true).longOpt("storage_uri").build());
+    options.addOption(Option.builder("").required(false).hasArg(true).longOpt("phrase").build());
+
+    CommandLine cl = (new DefaultParser()).parse(options, args);
+    String storageUri = cl.getOptionValue("storage_uri", "gs://cloud-samples-data/speech/time.mp3");
+    String phrase = cl.getOptionValue("phrase", "$TIME");
+
+    sampleRecognize(storageUri, phrase);
+  }
+}
diff --git a/samples/src/main/java/com/google/cloud/examples/speech/v1p1beta1/SpeechQuickstartBeta.java b/samples/src/main/java/com/google/cloud/examples/speech/v1p1beta1/SpeechQuickstartBeta.java
new file mode 100644
index 000000000..6b35b0ada
--- /dev/null
+++ b/samples/src/main/java/com/google/cloud/examples/speech/v1p1beta1/SpeechQuickstartBeta.java
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// DO NOT EDIT! This is a generated sample ("Request",  "speech_quickstart_beta")
+// sample-metadata:
+//   title: Quickstart Beta
+//   description: Performs synchronous speech recognition on an audio file
+//   usage: gradle run -PmainClass=com.google.cloud.examples.speech.v1p1beta1.SpeechQuickstartBeta [--args='[--storage_uri "gs://cloud-samples-data/speech/brooklyn_bridge.mp3"]']
+
+package com.google.cloud.examples.speech.v1p1beta1;
+
+import com.google.cloud.speech.v1p1beta1.RecognitionAudio;
+import com.google.cloud.speech.v1p1beta1.RecognitionConfig;
+import com.google.cloud.speech.v1p1beta1.RecognizeRequest;
+import com.google.cloud.speech.v1p1beta1.RecognizeResponse;
+import com.google.cloud.speech.v1p1beta1.SpeechClient;
+import com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative;
+import com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.DefaultParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+
+public class SpeechQuickstartBeta {
+  // [START speech_quickstart_beta]
+  /*
+   * Please include the following imports to run this sample.
+   *
+   * import com.google.cloud.speech.v1p1beta1.RecognitionAudio;
+   * import com.google.cloud.speech.v1p1beta1.RecognitionConfig;
+   * import com.google.cloud.speech.v1p1beta1.RecognizeRequest;
+   * import com.google.cloud.speech.v1p1beta1.RecognizeResponse;
+   * import com.google.cloud.speech.v1p1beta1.SpeechClient;
+   * import com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative;
+   * import com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult;
+   */
+
+  /**
+   * Performs synchronous speech recognition on an audio file
+   *
+   * @param storageUri URI for audio file in Cloud Storage, e.g. gs://[BUCKET]/[FILE]
+   */
+  public static void sampleRecognize(String storageUri) {
+    try (SpeechClient speechClient = SpeechClient.create()) {
+      // storageUri = "gs://cloud-samples-data/speech/brooklyn_bridge.mp3";
+
+      // The language of the supplied audio
+      String languageCode = "en-US";
+
+      // Sample rate in Hertz of the audio data sent
+      int sampleRateHertz = 44100;
+
+      // Encoding of audio data sent. This sample sets this explicitly.
+      // This field is optional for FLAC and WAV audio formats.
+      RecognitionConfig.AudioEncoding encoding = RecognitionConfig.AudioEncoding.MP3;
+      RecognitionConfig config =
+          RecognitionConfig.newBuilder()
+              .setLanguageCode(languageCode)
+              .setSampleRateHertz(sampleRateHertz)
+              .setEncoding(encoding)
+              .build();
+      RecognitionAudio audio = RecognitionAudio.newBuilder().setUri(storageUri).build();
+      RecognizeRequest request =
+          RecognizeRequest.newBuilder().setConfig(config).setAudio(audio).build();
+      RecognizeResponse response = speechClient.recognize(request);
+      for (SpeechRecognitionResult result : response.getResultsList()) {
+        // First alternative is the most probable result
+        SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
+        System.out.printf("Transcript: %s\n", alternative.getTranscript());
+      }
+    } catch (Exception exception) {
+      System.err.println("Failed to create the client due to: " + exception);
+    }
+  }
+  // [END speech_quickstart_beta]
+
+  public static void main(String[] args) throws Exception {
+    Options options = new Options();
+    options.addOption(
+        Option.builder("").required(false).hasArg(true).longOpt("storage_uri").build());
+
+    CommandLine cl = (new DefaultParser()).parse(options, args);
+    String storageUri =
+        cl.getOptionValue("storage_uri", "gs://cloud-samples-data/speech/brooklyn_bridge.mp3");
+
+    sampleRecognize(storageUri);
+  }
+}
diff --git a/samples/src/main/java/com/google/cloud/examples/speech/v1p1beta1/SpeechTranscribeAutoPunctuationBeta.java b/samples/src/main/java/com/google/cloud/examples/speech/v1p1beta1/SpeechTranscribeAutoPunctuationBeta.java
new file mode 100644
index 000000000..3c0cbb82b
--- /dev/null
+++ b/samples/src/main/java/com/google/cloud/examples/speech/v1p1beta1/SpeechTranscribeAutoPunctuationBeta.java
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// DO NOT EDIT! This is a generated sample ("Request",  "speech_transcribe_auto_punctuation_beta")
+// sample-metadata:
+//   title: Getting punctuation in results (Local File) (Beta)
+//   description: Transcribe a short audio file with punctuation
+//   usage: gradle run -PmainClass=com.google.cloud.examples.speech.v1p1beta1.SpeechTranscribeAutoPunctuationBeta [--args='[--local_file_path "resources/commercial_mono.wav"]']
+
+package com.google.cloud.examples.speech.v1p1beta1;
+
+import com.google.cloud.speech.v1p1beta1.RecognitionAudio;
+import com.google.cloud.speech.v1p1beta1.RecognitionConfig;
+import com.google.cloud.speech.v1p1beta1.RecognizeRequest;
+import com.google.cloud.speech.v1p1beta1.RecognizeResponse;
+import com.google.cloud.speech.v1p1beta1.SpeechClient;
+import com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative;
+import com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult;
+import com.google.protobuf.ByteString;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.DefaultParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+
+public class SpeechTranscribeAutoPunctuationBeta {
+  // [START speech_transcribe_auto_punctuation_beta]
+  /*
+   * Please include the following imports to run this sample.
+   *
+   * import com.google.cloud.speech.v1p1beta1.RecognitionAudio;
+   * import com.google.cloud.speech.v1p1beta1.RecognitionConfig;
+   * import com.google.cloud.speech.v1p1beta1.RecognizeRequest;
+   * import com.google.cloud.speech.v1p1beta1.RecognizeResponse;
+   * import com.google.cloud.speech.v1p1beta1.SpeechClient;
+   * import com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative;
+   * import com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult;
+   * import com.google.protobuf.ByteString;
+   * import java.nio.file.Files;
+   * import java.nio.file.Path;
+   * import java.nio.file.Paths;
+   */
+
+  /**
+   * Transcribe a short audio file with punctuation
+   *
+   * @param localFilePath Path to local audio file, e.g. /path/audio.wav
+   */
+  public static void sampleRecognize(String localFilePath) {
+    try (SpeechClient speechClient = SpeechClient.create()) {
+      // localFilePath = "resources/commercial_mono.wav";
+
+      // When enabled, trascription results may include punctuation
+      // (available for select languages).
+      boolean enableAutomaticPunctuation = true;
+
+      // The language of the supplied audio. Even though additional languages are
+      // provided by alternative_language_codes, a primary language is still required.
+      String languageCode = "en-US";
+      RecognitionConfig config =
+          RecognitionConfig.newBuilder()
+              .setEnableAutomaticPunctuation(enableAutomaticPunctuation)
+              .setLanguageCode(languageCode)
+              .build();
+      Path path = Paths.get(localFilePath);
+      byte[] data = Files.readAllBytes(path);
+      ByteString content = ByteString.copyFrom(data);
+      RecognitionAudio audio = RecognitionAudio.newBuilder().setContent(content).build();
+      RecognizeRequest request =
+          RecognizeRequest.newBuilder().setConfig(config).setAudio(audio).build();
+      RecognizeResponse response = speechClient.recognize(request);
+      for (SpeechRecognitionResult result : response.getResultsList()) {
+        // First alternative is the most probable result
+        SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
+        System.out.printf("Transcript: %s\n", alternative.getTranscript());
+      }
+    } catch (Exception exception) {
+      System.err.println("Failed to create the client due to: " + exception);
+    }
+  }
+  // [END speech_transcribe_auto_punctuation_beta]
+
+  public static void main(String[] args) throws Exception {
+    Options options = new Options();
+    options.addOption(
+        Option.builder("").required(false).hasArg(true).longOpt("local_file_path").build());
+
+    CommandLine cl = (new DefaultParser()).parse(options, args);
+    String localFilePath = cl.getOptionValue("local_file_path", "resources/commercial_mono.wav");
+
+    sampleRecognize(localFilePath);
+  }
+}
diff --git a/samples/src/main/java/com/google/cloud/examples/speech/v1p1beta1/SpeechTranscribeDiarizationBeta.java b/samples/src/main/java/com/google/cloud/examples/speech/v1p1beta1/SpeechTranscribeDiarizationBeta.java
new file mode 100644
index 000000000..efd87cf87
--- /dev/null
+++ b/samples/src/main/java/com/google/cloud/examples/speech/v1p1beta1/SpeechTranscribeDiarizationBeta.java
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// DO NOT EDIT! This is a generated sample ("LongRunningRequestAsync",  "speech_transcribe_diarization_beta")
+// sample-metadata:
+//   title: Separating different speakers (Local File) (LRO) (Beta)
+//   description: Print confidence level for individual words in a transcription of a short audio file
+//     Separating different speakers in an audio file recording
+//   usage: gradle run -PmainClass=com.google.cloud.examples.speech.v1p1beta1.SpeechTranscribeDiarizationBeta [--args='[--local_file_path "resources/commercial_mono.wav"]']
+
+package com.google.cloud.examples.speech.v1p1beta1;
+
+import com.google.api.gax.longrunning.OperationFuture;
+import com.google.cloud.speech.v1p1beta1.LongRunningRecognizeMetadata;
+import com.google.cloud.speech.v1p1beta1.LongRunningRecognizeRequest;
+import com.google.cloud.speech.v1p1beta1.LongRunningRecognizeResponse;
+import com.google.cloud.speech.v1p1beta1.RecognitionAudio;
+import com.google.cloud.speech.v1p1beta1.RecognitionConfig;
+import com.google.cloud.speech.v1p1beta1.SpeechClient;
+import com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative;
+import com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult;
+import com.google.cloud.speech.v1p1beta1.WordInfo;
+import com.google.protobuf.ByteString;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.DefaultParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+
+public class SpeechTranscribeDiarizationBeta {
+  // [START speech_transcribe_diarization_beta]
+  /*
+   * Please include the following imports to run this sample.
+   *
+   * import com.google.api.gax.longrunning.OperationFuture;
+   * import com.google.cloud.speech.v1p1beta1.LongRunningRecognizeMetadata;
+   * import com.google.cloud.speech.v1p1beta1.LongRunningRecognizeRequest;
+   * import com.google.cloud.speech.v1p1beta1.LongRunningRecognizeResponse;
+   * import com.google.cloud.speech.v1p1beta1.RecognitionAudio;
+   * import com.google.cloud.speech.v1p1beta1.RecognitionConfig;
+   * import com.google.cloud.speech.v1p1beta1.SpeechClient;
+   * import com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative;
+   * import com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult;
+   * import com.google.cloud.speech.v1p1beta1.WordInfo;
+   * import com.google.protobuf.ByteString;
+   * import java.nio.file.Files;
+   * import java.nio.file.Path;
+   * import java.nio.file.Paths;
+   */
+
+  /**
+   * Print confidence level for individual words in a transcription of a short audio file Separating
+   * different speakers in an audio file recording
+   *
+   * @param localFilePath Path to local audio file, e.g. /path/audio.wav
+   */
+  public static void sampleLongRunningRecognize(String localFilePath) {
+    try (SpeechClient speechClient = SpeechClient.create()) {
+      // localFilePath = "resources/commercial_mono.wav";
+
+      // If enabled, each word in the first alternative of each result will be
+      // tagged with a speaker tag to identify the speaker.
+      boolean enableSpeakerDiarization = true;
+
+      // Optional. Specifies the estimated number of speakers in the conversation.
+      int diarizationSpeakerCount = 2;
+
+      // The language of the supplied audio
+      String languageCode = "en-US";
+      RecognitionConfig config =
+          RecognitionConfig.newBuilder()
+              .setEnableSpeakerDiarization(enableSpeakerDiarization)
+              .setDiarizationSpeakerCount(diarizationSpeakerCount)
+              .setLanguageCode(languageCode)
+              .build();
+      Path path = Paths.get(localFilePath);
+      byte[] data = Files.readAllBytes(path);
+      ByteString content = ByteString.copyFrom(data);
+      RecognitionAudio audio = RecognitionAudio.newBuilder().setContent(content).build();
+      LongRunningRecognizeRequest request =
+          LongRunningRecognizeRequest.newBuilder().setConfig(config).setAudio(audio).build();
+      OperationFuture future =
+          speechClient.longRunningRecognizeAsync(request);
+
+      System.out.println("Waiting for operation to complete...");
+      LongRunningRecognizeResponse response = future.get();
+      for (SpeechRecognitionResult result : response.getResultsList()) {
+        // First alternative has words tagged with speakers
+        SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
+        System.out.printf("Transcript: %s\n", alternative.getTranscript());
+        // Print the speakerTag of each word
+        for (WordInfo word : alternative.getWordsList()) {
+          System.out.printf("Word: %s\n", word.getWord());
+          System.out.printf("Speaker tag: %s\n", word.getSpeakerTag());
+        }
+      }
+    } catch (Exception exception) {
+      System.err.println("Failed to create the client due to: " + exception);
+    }
+  }
+  // [END speech_transcribe_diarization_beta]
+
+  public static void main(String[] args) throws Exception {
+    Options options = new Options();
+    options.addOption(
+        Option.builder("").required(false).hasArg(true).longOpt("local_file_path").build());
+
+    CommandLine cl = (new DefaultParser()).parse(options, args);
+    String localFilePath = cl.getOptionValue("local_file_path", "resources/commercial_mono.wav");
+
+    sampleLongRunningRecognize(localFilePath);
+  }
+}
diff --git a/samples/src/main/java/com/google/cloud/examples/speech/v1p1beta1/SpeechTranscribeMultilanguageBeta.java b/samples/src/main/java/com/google/cloud/examples/speech/v1p1beta1/SpeechTranscribeMultilanguageBeta.java
new file mode 100644
index 000000000..05a994de4
--- /dev/null
+++ b/samples/src/main/java/com/google/cloud/examples/speech/v1p1beta1/SpeechTranscribeMultilanguageBeta.java
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// DO NOT EDIT! This is a generated sample ("Request",  "speech_transcribe_multilanguage_beta")
+// sample-metadata:
+//   title: Detecting language spoken automatically (Local File) (Beta)
+//   description: Transcribe a short audio file with language detected from a list of possible languages
+//   usage: gradle run -PmainClass=com.google.cloud.examples.speech.v1p1beta1.SpeechTranscribeMultilanguageBeta [--args='[--local_file_path "resources/brooklyn_bridge.flac"]']
+
+package com.google.cloud.examples.speech.v1p1beta1;
+
+import com.google.cloud.speech.v1p1beta1.RecognitionAudio;
+import com.google.cloud.speech.v1p1beta1.RecognitionConfig;
+import com.google.cloud.speech.v1p1beta1.RecognizeRequest;
+import com.google.cloud.speech.v1p1beta1.RecognizeResponse;
+import com.google.cloud.speech.v1p1beta1.SpeechClient;
+import com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative;
+import com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult;
+import com.google.protobuf.ByteString;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Arrays;
+import java.util.List;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.DefaultParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+
+public class SpeechTranscribeMultilanguageBeta {
+  // [START speech_transcribe_multilanguage_beta]
+  /*
+   * Please include the following imports to run this sample.
+   *
+   * import com.google.cloud.speech.v1p1beta1.RecognitionAudio;
+   * import com.google.cloud.speech.v1p1beta1.RecognitionConfig;
+   * import com.google.cloud.speech.v1p1beta1.RecognizeRequest;
+   * import com.google.cloud.speech.v1p1beta1.RecognizeResponse;
+   * import com.google.cloud.speech.v1p1beta1.SpeechClient;
+   * import com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative;
+   * import com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult;
+   * import com.google.protobuf.ByteString;
+   * import java.nio.file.Files;
+   * import java.nio.file.Path;
+   * import java.nio.file.Paths;
+   * import java.util.Arrays;
+   * import java.util.List;
+   */
+
+  /**
+   * Transcribe a short audio file with language detected from a list of possible languages
+   *
+   * @param localFilePath Path to local audio file, e.g. /path/audio.wav
+   */
+  public static void sampleRecognize(String localFilePath) {
+    try (SpeechClient speechClient = SpeechClient.create()) {
+      // localFilePath = "resources/brooklyn_bridge.flac";
+
+      // The language of the supplied audio. Even though additional languages are
+      // provided by alternative_language_codes, a primary language is still required.
+      String languageCode = "fr";
+
+      // Specify up to 3 additional languages as possible alternative languages
+      // of the supplied audio.
+      String alternativeLanguageCodesElement = "es";
+      String alternativeLanguageCodesElement2 = "en";
+      List alternativeLanguageCodes =
+          Arrays.asList(alternativeLanguageCodesElement, alternativeLanguageCodesElement2);
+      RecognitionConfig config =
+          RecognitionConfig.newBuilder()
+              .setLanguageCode(languageCode)
+              .addAllAlternativeLanguageCodes(alternativeLanguageCodes)
+              .build();
+      Path path = Paths.get(localFilePath);
+      byte[] data = Files.readAllBytes(path);
+      ByteString content = ByteString.copyFrom(data);
+      RecognitionAudio audio = RecognitionAudio.newBuilder().setContent(content).build();
+      RecognizeRequest request =
+          RecognizeRequest.newBuilder().setConfig(config).setAudio(audio).build();
+      RecognizeResponse response = speechClient.recognize(request);
+      for (SpeechRecognitionResult result : response.getResultsList()) {
+        // The languageCode which was detected as the most likely being spoken in the audio
+        System.out.printf("Detected language: %s\n", result.getLanguageCode());
+        // First alternative is the most probable result
+        SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
+        System.out.printf("Transcript: %s\n", alternative.getTranscript());
+      }
+    } catch (Exception exception) {
+      System.err.println("Failed to create the client due to: " + exception);
+    }
+  }
+  // [END speech_transcribe_multilanguage_beta]
+
+  public static void main(String[] args) throws Exception {
+    Options options = new Options();
+    options.addOption(
+        Option.builder("").required(false).hasArg(true).longOpt("local_file_path").build());
+
+    CommandLine cl = (new DefaultParser()).parse(options, args);
+    String localFilePath = cl.getOptionValue("local_file_path", "resources/brooklyn_bridge.flac");
+
+    sampleRecognize(localFilePath);
+  }
+}
diff --git a/samples/src/main/java/com/google/cloud/examples/speech/v1p1beta1/SpeechTranscribeRecognitionMetadataBeta.java b/samples/src/main/java/com/google/cloud/examples/speech/v1p1beta1/SpeechTranscribeRecognitionMetadataBeta.java
new file mode 100644
index 000000000..732f4cb97
--- /dev/null
+++ b/samples/src/main/java/com/google/cloud/examples/speech/v1p1beta1/SpeechTranscribeRecognitionMetadataBeta.java
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// DO NOT EDIT! This is a generated sample ("Request",  "speech_transcribe_recognition_metadata_beta")
+// sample-metadata:
+//   title: Adding recognition metadata (Local File) (Beta)
+//   description: Adds additional details short audio file included in this recognition request
+//   usage: gradle run -PmainClass=com.google.cloud.examples.speech.v1p1beta1.SpeechTranscribeRecognitionMetadataBeta [--args='[--local_file_path "resources/commercial_mono.wav"]']
+
+package com.google.cloud.examples.speech.v1p1beta1;
+
+import com.google.cloud.speech.v1p1beta1.RecognitionAudio;
+import com.google.cloud.speech.v1p1beta1.RecognitionConfig;
+import com.google.cloud.speech.v1p1beta1.RecognitionMetadata;
+import com.google.cloud.speech.v1p1beta1.RecognizeRequest;
+import com.google.cloud.speech.v1p1beta1.RecognizeResponse;
+import com.google.cloud.speech.v1p1beta1.SpeechClient;
+import com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative;
+import com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult;
+import com.google.protobuf.ByteString;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.DefaultParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+
+public class SpeechTranscribeRecognitionMetadataBeta {
+  // [START speech_transcribe_recognition_metadata_beta]
+  /*
+   * Please include the following imports to run this sample.
+   *
+   * import com.google.cloud.speech.v1p1beta1.RecognitionAudio;
+   * import com.google.cloud.speech.v1p1beta1.RecognitionConfig;
+   * import com.google.cloud.speech.v1p1beta1.RecognitionMetadata;
+   * import com.google.cloud.speech.v1p1beta1.RecognizeRequest;
+   * import com.google.cloud.speech.v1p1beta1.RecognizeResponse;
+   * import com.google.cloud.speech.v1p1beta1.SpeechClient;
+   * import com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative;
+   * import com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult;
+   * import com.google.protobuf.ByteString;
+   * import java.nio.file.Files;
+   * import java.nio.file.Path;
+   * import java.nio.file.Paths;
+   */
+
+  /**
+   * Adds additional details short audio file included in this recognition request
+   *
+   * @param localFilePath Path to local audio file, e.g. /path/audio.wav
+   */
+  public static void sampleRecognize(String localFilePath) {
+    try (SpeechClient speechClient = SpeechClient.create()) {
+      // localFilePath = "resources/commercial_mono.wav";
+
+      // The use case of the audio, e.g. PHONE_CALL, DISCUSSION, PRESENTATION, et al.
+      RecognitionMetadata.InteractionType interactionType =
+          RecognitionMetadata.InteractionType.VOICE_SEARCH;
+
+      // The kind of device used to capture the audio
+      RecognitionMetadata.RecordingDeviceType recordingDeviceType =
+          RecognitionMetadata.RecordingDeviceType.SMARTPHONE;
+
+      // The device used to make the recording.
+      // Arbitrary string, e.g. 'Pixel XL', 'VoIP', 'Cardioid Microphone', or other value.
+      String recordingDeviceName = "Pixel 3";
+      RecognitionMetadata metadata =
+          RecognitionMetadata.newBuilder()
+              .setInteractionType(interactionType)
+              .setRecordingDeviceType(recordingDeviceType)
+              .setRecordingDeviceName(recordingDeviceName)
+              .build();
+
+      // The language of the supplied audio. Even though additional languages are
+      // provided by alternative_language_codes, a primary language is still required.
+      String languageCode = "en-US";
+      RecognitionConfig config =
+          RecognitionConfig.newBuilder()
+              .setMetadata(metadata)
+              .setLanguageCode(languageCode)
+              .build();
+      Path path = Paths.get(localFilePath);
+      byte[] data = Files.readAllBytes(path);
+      ByteString content = ByteString.copyFrom(data);
+      RecognitionAudio audio = RecognitionAudio.newBuilder().setContent(content).build();
+      RecognizeRequest request =
+          RecognizeRequest.newBuilder().setConfig(config).setAudio(audio).build();
+      RecognizeResponse response = speechClient.recognize(request);
+      for (SpeechRecognitionResult result : response.getResultsList()) {
+        // First alternative is the most probable result
+        SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
+        System.out.printf("Transcript: %s\n", alternative.getTranscript());
+      }
+    } catch (Exception exception) {
+      System.err.println("Failed to create the client due to: " + exception);
+    }
+  }
+  // [END speech_transcribe_recognition_metadata_beta]
+
+  public static void main(String[] args) throws Exception {
+    Options options = new Options();
+    options.addOption(
+        Option.builder("").required(false).hasArg(true).longOpt("local_file_path").build());
+
+    CommandLine cl = (new DefaultParser()).parse(options, args);
+    String localFilePath = cl.getOptionValue("local_file_path", "resources/commercial_mono.wav");
+
+    sampleRecognize(localFilePath);
+  }
+}
diff --git a/samples/src/main/java/com/google/cloud/examples/speech/v1p1beta1/SpeechTranscribeWordLevelConfidenceBeta.java b/samples/src/main/java/com/google/cloud/examples/speech/v1p1beta1/SpeechTranscribeWordLevelConfidenceBeta.java
new file mode 100644
index 000000000..979565c5b
--- /dev/null
+++ b/samples/src/main/java/com/google/cloud/examples/speech/v1p1beta1/SpeechTranscribeWordLevelConfidenceBeta.java
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// DO NOT EDIT! This is a generated sample ("Request",  "speech_transcribe_word_level_confidence_beta")
+// sample-metadata:
+//   title: Enabling word-level confidence (Local File) (Beta)
+//   description: Print confidence level for individual words in a transcription of a short audio file.
+//   usage: gradle run -PmainClass=com.google.cloud.examples.speech.v1p1beta1.SpeechTranscribeWordLevelConfidenceBeta [--args='[--local_file_path "resources/brooklyn_bridge.flac"]']
+
+package com.google.cloud.examples.speech.v1p1beta1;
+
+import com.google.cloud.speech.v1p1beta1.RecognitionAudio;
+import com.google.cloud.speech.v1p1beta1.RecognitionConfig;
+import com.google.cloud.speech.v1p1beta1.RecognizeRequest;
+import com.google.cloud.speech.v1p1beta1.RecognizeResponse;
+import com.google.cloud.speech.v1p1beta1.SpeechClient;
+import com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative;
+import com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult;
+import com.google.cloud.speech.v1p1beta1.WordInfo;
+import com.google.protobuf.ByteString;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.DefaultParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+
+public class SpeechTranscribeWordLevelConfidenceBeta {
+  // [START speech_transcribe_word_level_confidence_beta]
+  /*
+   * Please include the following imports to run this sample.
+   *
+   * import com.google.cloud.speech.v1p1beta1.RecognitionAudio;
+   * import com.google.cloud.speech.v1p1beta1.RecognitionConfig;
+   * import com.google.cloud.speech.v1p1beta1.RecognizeRequest;
+   * import com.google.cloud.speech.v1p1beta1.RecognizeResponse;
+   * import com.google.cloud.speech.v1p1beta1.SpeechClient;
+   * import com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative;
+   * import com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult;
+   * import com.google.cloud.speech.v1p1beta1.WordInfo;
+   * import com.google.protobuf.ByteString;
+   * import java.nio.file.Files;
+   * import java.nio.file.Path;
+   * import java.nio.file.Paths;
+   */
+
+  /**
+   * Print confidence level for individual words in a transcription of a short audio file.
+   *
+   * @param localFilePath Path to local audio file, e.g. /path/audio.wav
+   */
+  public static void sampleRecognize(String localFilePath) {
+    try (SpeechClient speechClient = SpeechClient.create()) {
+      // localFilePath = "resources/brooklyn_bridge.flac";
+
+      // When enabled, the first result returned by the API will include a list
+      // of words and the confidence level for each of those words.
+      boolean enableWordConfidence = true;
+
+      // The language of the supplied audio
+      String languageCode = "en-US";
+      RecognitionConfig config =
+          RecognitionConfig.newBuilder()
+              .setEnableWordConfidence(enableWordConfidence)
+              .setLanguageCode(languageCode)
+              .build();
+      Path path = Paths.get(localFilePath);
+      byte[] data = Files.readAllBytes(path);
+      ByteString content = ByteString.copyFrom(data);
+      RecognitionAudio audio = RecognitionAudio.newBuilder().setContent(content).build();
+      RecognizeRequest request =
+          RecognizeRequest.newBuilder().setConfig(config).setAudio(audio).build();
+      RecognizeResponse response = speechClient.recognize(request);
+      // The first result includes confidence levels per word
+      SpeechRecognitionResult result = response.getResultsList().get(0);
+      // First alternative is the most probable result
+      SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
+      System.out.printf("Transcript: %s\n", alternative.getTranscript());
+      // Print the confidence level of each word
+      for (WordInfo word : alternative.getWordsList()) {
+        System.out.printf("Word: %s\n", word.getWord());
+        System.out.printf("Confidence: %s\n", word.getConfidence());
+      }
+    } catch (Exception exception) {
+      System.err.println("Failed to create the client due to: " + exception);
+    }
+  }
+  // [END speech_transcribe_word_level_confidence_beta]
+
+  public static void main(String[] args) throws Exception {
+    Options options = new Options();
+    options.addOption(
+        Option.builder("").required(false).hasArg(true).longOpt("local_file_path").build());
+
+    CommandLine cl = (new DefaultParser()).parse(options, args);
+    String localFilePath = cl.getOptionValue("local_file_path", "resources/brooklyn_bridge.flac");
+
+    sampleRecognize(localFilePath);
+  }
+}
diff --git a/samples/src/main/java/com/google/cloud/examples/speech/v1p1beta1/speech.java.20191017.083221.manifest.yaml b/samples/src/main/java/com/google/cloud/examples/speech/v1p1beta1/speech.java.20191017.083221.manifest.yaml
new file mode 100644
index 000000000..cf1d363d5
--- /dev/null
+++ b/samples/src/main/java/com/google/cloud/examples/speech/v1p1beta1/speech.java.20191017.083221.manifest.yaml
@@ -0,0 +1,50 @@
+---
+type: manifest/samples
+schema_version: 3
+java: &java
+  environment: java
+  bin: mvn exec:java
+  base_path: samples/src/main/java/com/google/cloud/examples/speech/v1p1beta1
+  package: com.google.cloud.examples.speech.v1p1beta1
+  invocation: {bin} -Dexec.mainClass={class} -Dexec.args='@args'
+samples:
+- <<: *java
+  sample: "speech_contexts_classes_beta"
+  path: "{base_path}/SpeechContextsClassesBeta.java"
+  class: {package}.SpeechContextsClassesBeta
+  region_tag: "speech_contexts_classes_beta"
+- <<: *java
+  sample: "speech_transcribe_auto_punctuation_beta"
+  path: "{base_path}/SpeechTranscribeAutoPunctuationBeta.java"
+  class: {package}.SpeechTranscribeAutoPunctuationBeta
+  region_tag: "speech_transcribe_auto_punctuation_beta"
+- <<: *java
+  sample: "speech_quickstart_beta"
+  path: "{base_path}/SpeechQuickstartBeta.java"
+  class: {package}.SpeechQuickstartBeta
+  region_tag: "speech_quickstart_beta"
+- <<: *java
+  sample: "speech_transcribe_word_level_confidence_beta"
+  path: "{base_path}/SpeechTranscribeWordLevelConfidenceBeta.java"
+  class: {package}.SpeechTranscribeWordLevelConfidenceBeta
+  region_tag: "speech_transcribe_word_level_confidence_beta"
+- <<: *java
+  sample: "speech_adaptation_beta"
+  path: "{base_path}/SpeechAdaptationBeta.java"
+  class: {package}.SpeechAdaptationBeta
+  region_tag: "speech_adaptation_beta"
+- <<: *java
+  sample: "speech_transcribe_multilanguage_beta"
+  path: "{base_path}/SpeechTranscribeMultilanguageBeta.java"
+  class: {package}.SpeechTranscribeMultilanguageBeta
+  region_tag: "speech_transcribe_multilanguage_beta"
+- <<: *java
+  sample: "speech_transcribe_recognition_metadata_beta"
+  path: "{base_path}/SpeechTranscribeRecognitionMetadataBeta.java"
+  class: {package}.SpeechTranscribeRecognitionMetadataBeta
+  region_tag: "speech_transcribe_recognition_metadata_beta"
+- <<: *java
+  sample: "speech_transcribe_diarization_beta"
+  path: "{base_path}/SpeechTranscribeDiarizationBeta.java"
+  class: {package}.SpeechTranscribeDiarizationBeta
+  region_tag: "speech_transcribe_diarization_beta"
diff --git a/synth.metadata b/synth.metadata
index 279617a89..64e135311 100644
--- a/synth.metadata
+++ b/synth.metadata
@@ -1,19 +1,26 @@
 {
-  "updateTime": "2019-09-16T21:36:13.131232Z",
+  "updateTime": "2019-10-17T20:32:36.613998Z",
   "sources": [
     {
       "generator": {
         "name": "artman",
-        "version": "0.36.2",
-        "dockerImage": "googleapis/artman@sha256:0e6f3a668cd68afc768ecbe08817cf6e56a0e64fcbdb1c58c3b97492d12418a1"
+        "version": "0.39.0",
+        "dockerImage": "googleapis/artman@sha256:72554d0b3bdc0b4ac7d6726a6a606c00c14b454339037ed86be94574fb05d9f3"
       }
     },
     {
       "git": {
         "name": "googleapis",
         "remote": "https://github.com/googleapis/googleapis.git",
-        "sha": "6b2ba2ae3124c22ecb56af7102c78110b8576671",
-        "internalRef": "268974829"
+        "sha": "a05f640453ac7b4e1361dfceeae15ee6e02317f1",
+        "internalRef": "275258873"
+      }
+    },
+    {
+      "template": {
+        "name": "java_library",
+        "origin": "synthtool.gcp",
+        "version": "2019.5.2"
       }
     }
   ],
@@ -27,6 +34,16 @@
         "generator": "gapic",
         "config": "google/cloud/speech/artman_speech_v1.yaml"
       }
+    },
+    {
+      "client": {
+        "source": "googleapis",
+        "apiName": "speech",
+        "apiVersion": "v1p1beta1",
+        "language": "java",
+        "generator": "gapic",
+        "config": "google/cloud/speech/artman_speech_v1p1beta1.yaml"
+      }
     }
   ]
 }
\ No newline at end of file