From 872d4f02731b2ba60debd40451254edf32b22709 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Thu, 8 Apr 2021 08:36:06 -0700 Subject: [PATCH] feat: Support for spoken punctuation and spoken emojis (#477) This PR was generated using Autosynth. :rainbow: Synth log will be available here: https://source.cloud.google.com/results/invocations/e42939d1-f638-4298-bb62-eb2bc9f61650/targets - [ ] To automatically regenerate this PR, check this box. (May take up to 24 hours.) PiperOrigin-RevId: 367239272 Source-Link: https://github.com/googleapis/googleapis/commit/93b078ae0decd51e618041bb337a8d592d0c998b --- .../speech/v1p1beta1/RecognitionConfig.java | 745 +++++++++++++++++- .../v1p1beta1/RecognitionConfigOrBuilder.java | 116 ++- .../cloud/speech/v1p1beta1/SpeechProto.java | 289 +++---- .../v1p1beta1/StreamingRecognizeResponse.java | 8 +- .../cloud/speech/v1p1beta1/cloud_speech.proto | 26 +- synth.metadata | 10 +- 6 files changed, 997 insertions(+), 197 deletions(-) diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionConfig.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionConfig.java index 114d4f6db..6a6aec931 100644 --- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionConfig.java +++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionConfig.java @@ -223,6 +223,36 @@ private RecognitionConfig( adaptation_ = subBuilder.buildPartial(); } + break; + } + case 178: + { + com.google.protobuf.BoolValue.Builder subBuilder = null; + if (enableSpokenPunctuation_ != null) { + subBuilder = enableSpokenPunctuation_.toBuilder(); + } + enableSpokenPunctuation_ = + input.readMessage(com.google.protobuf.BoolValue.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(enableSpokenPunctuation_); + enableSpokenPunctuation_ = subBuilder.buildPartial(); + } + + break; + } + case 186: + { + com.google.protobuf.BoolValue.Builder subBuilder = null; + if (enableSpokenEmojis_ != null) { + subBuilder = enableSpokenEmojis_.toBuilder(); + } + enableSpokenEmojis_ = + input.readMessage(com.google.protobuf.BoolValue.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(enableSpokenEmojis_); + enableSpokenEmojis_ = subBuilder.buildPartial(); + } + break; } default: @@ -950,7 +980,7 @@ public boolean getProfanityFilter() { * Speech adaptation configuration improves the accuracy of speech * recognition. When speech adaptation is set it supersedes the * `speech_contexts` field. For more information, see the [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength) + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation) * documentation. * * @@ -969,7 +999,7 @@ public boolean hasAdaptation() { * Speech adaptation configuration improves the accuracy of speech * recognition. When speech adaptation is set it supersedes the * `speech_contexts` field. For more information, see the [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength) + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation) * documentation. * * @@ -990,7 +1020,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechAdaptation getAdaptation() { * Speech adaptation configuration improves the accuracy of speech * recognition. When speech adaptation is set it supersedes the * `speech_contexts` field. For more information, see the [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength) + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation) * documentation. * * @@ -1011,7 +1041,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechAdaptationOrBuilder getAdaptation * A means to provide context to assist the speech recognition. For more * information, see * [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). * * * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -1028,7 +1058,7 @@ public java.util.List getSpeech * A means to provide context to assist the speech recognition. For more * information, see * [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). * * * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -1046,7 +1076,7 @@ public java.util.List getSpeech * A means to provide context to assist the speech recognition. For more * information, see * [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). * * * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -1063,7 +1093,7 @@ public int getSpeechContextsCount() { * A means to provide context to assist the speech recognition. For more * information, see * [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). * * * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -1080,7 +1110,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechContext getSpeechContexts(int ind * A means to provide context to assist the speech recognition. For more * information, see * [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). * * * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -1153,6 +1183,132 @@ public boolean getEnableAutomaticPunctuation() { return enableAutomaticPunctuation_; } + public static final int ENABLE_SPOKEN_PUNCTUATION_FIELD_NUMBER = 22; + private com.google.protobuf.BoolValue enableSpokenPunctuation_; + /** + * + * + *
+   * The spoken punctuation behavior for the call
+   * If not set, uses default behavior based on model of choice
+   * e.g. command_and_search will enable spoken punctuation by default
+   * If 'true', replaces spoken punctuation with the corresponding symbols in
+   * the request. For example, "how are you question mark" becomes "how are
+   * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
+   * for support. If 'false', spoken punctuation is not replaced.
+   * 
+ * + * .google.protobuf.BoolValue enable_spoken_punctuation = 22; + * + * @return Whether the enableSpokenPunctuation field is set. + */ + @java.lang.Override + public boolean hasEnableSpokenPunctuation() { + return enableSpokenPunctuation_ != null; + } + /** + * + * + *
+   * The spoken punctuation behavior for the call
+   * If not set, uses default behavior based on model of choice
+   * e.g. command_and_search will enable spoken punctuation by default
+   * If 'true', replaces spoken punctuation with the corresponding symbols in
+   * the request. For example, "how are you question mark" becomes "how are
+   * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
+   * for support. If 'false', spoken punctuation is not replaced.
+   * 
+ * + * .google.protobuf.BoolValue enable_spoken_punctuation = 22; + * + * @return The enableSpokenPunctuation. + */ + @java.lang.Override + public com.google.protobuf.BoolValue getEnableSpokenPunctuation() { + return enableSpokenPunctuation_ == null + ? com.google.protobuf.BoolValue.getDefaultInstance() + : enableSpokenPunctuation_; + } + /** + * + * + *
+   * The spoken punctuation behavior for the call
+   * If not set, uses default behavior based on model of choice
+   * e.g. command_and_search will enable spoken punctuation by default
+   * If 'true', replaces spoken punctuation with the corresponding symbols in
+   * the request. For example, "how are you question mark" becomes "how are
+   * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
+   * for support. If 'false', spoken punctuation is not replaced.
+   * 
+ * + * .google.protobuf.BoolValue enable_spoken_punctuation = 22; + */ + @java.lang.Override + public com.google.protobuf.BoolValueOrBuilder getEnableSpokenPunctuationOrBuilder() { + return getEnableSpokenPunctuation(); + } + + public static final int ENABLE_SPOKEN_EMOJIS_FIELD_NUMBER = 23; + private com.google.protobuf.BoolValue enableSpokenEmojis_; + /** + * + * + *
+   * The spoken emoji behavior for the call
+   * If not set, uses default behavior based on model of choice
+   * If 'true', adds spoken emoji formatting for the request. This will replace
+   * spoken emojis with the corresponding Unicode symbols in the final
+   * transcript. If 'false', spoken emojis are not replaced.
+   * 
+ * + * .google.protobuf.BoolValue enable_spoken_emojis = 23; + * + * @return Whether the enableSpokenEmojis field is set. + */ + @java.lang.Override + public boolean hasEnableSpokenEmojis() { + return enableSpokenEmojis_ != null; + } + /** + * + * + *
+   * The spoken emoji behavior for the call
+   * If not set, uses default behavior based on model of choice
+   * If 'true', adds spoken emoji formatting for the request. This will replace
+   * spoken emojis with the corresponding Unicode symbols in the final
+   * transcript. If 'false', spoken emojis are not replaced.
+   * 
+ * + * .google.protobuf.BoolValue enable_spoken_emojis = 23; + * + * @return The enableSpokenEmojis. + */ + @java.lang.Override + public com.google.protobuf.BoolValue getEnableSpokenEmojis() { + return enableSpokenEmojis_ == null + ? com.google.protobuf.BoolValue.getDefaultInstance() + : enableSpokenEmojis_; + } + /** + * + * + *
+   * The spoken emoji behavior for the call
+   * If not set, uses default behavior based on model of choice
+   * If 'true', adds spoken emoji formatting for the request. This will replace
+   * spoken emojis with the corresponding Unicode symbols in the final
+   * transcript. If 'false', spoken emojis are not replaced.
+   * 
+ * + * .google.protobuf.BoolValue enable_spoken_emojis = 23; + */ + @java.lang.Override + public com.google.protobuf.BoolValueOrBuilder getEnableSpokenEmojisOrBuilder() { + return getEnableSpokenEmojis(); + } + public static final int ENABLE_SPEAKER_DIARIZATION_FIELD_NUMBER = 16; private boolean enableSpeakerDiarization_; /** @@ -1523,6 +1679,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (adaptation_ != null) { output.writeMessage(20, getAdaptation()); } + if (enableSpokenPunctuation_ != null) { + output.writeMessage(22, getEnableSpokenPunctuation()); + } + if (enableSpokenEmojis_ != null) { + output.writeMessage(23, getEnableSpokenEmojis()); + } unknownFields.writeTo(output); } @@ -1599,6 +1761,14 @@ public int getSerializedSize() { if (adaptation_ != null) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(20, getAdaptation()); } + if (enableSpokenPunctuation_ != null) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 22, getEnableSpokenPunctuation()); + } + if (enableSpokenEmojis_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(23, getEnableSpokenEmojis()); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -1633,6 +1803,14 @@ public boolean equals(final java.lang.Object obj) { if (getEnableWordTimeOffsets() != other.getEnableWordTimeOffsets()) return false; if (getEnableWordConfidence() != other.getEnableWordConfidence()) return false; if (getEnableAutomaticPunctuation() != other.getEnableAutomaticPunctuation()) return false; + if (hasEnableSpokenPunctuation() != other.hasEnableSpokenPunctuation()) return false; + if (hasEnableSpokenPunctuation()) { + if (!getEnableSpokenPunctuation().equals(other.getEnableSpokenPunctuation())) return false; + } + if (hasEnableSpokenEmojis() != other.hasEnableSpokenEmojis()) return false; + if (hasEnableSpokenEmojis()) { + if (!getEnableSpokenEmojis().equals(other.getEnableSpokenEmojis())) return false; + } if (getEnableSpeakerDiarization() != other.getEnableSpeakerDiarization()) return false; if (getDiarizationSpeakerCount() != other.getDiarizationSpeakerCount()) return false; if (hasDiarizationConfig() != other.hasDiarizationConfig()) return false; @@ -1690,6 +1868,14 @@ public int hashCode() { hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableWordConfidence()); hash = (37 * hash) + ENABLE_AUTOMATIC_PUNCTUATION_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableAutomaticPunctuation()); + if (hasEnableSpokenPunctuation()) { + hash = (37 * hash) + ENABLE_SPOKEN_PUNCTUATION_FIELD_NUMBER; + hash = (53 * hash) + getEnableSpokenPunctuation().hashCode(); + } + if (hasEnableSpokenEmojis()) { + hash = (37 * hash) + ENABLE_SPOKEN_EMOJIS_FIELD_NUMBER; + hash = (53 * hash) + getEnableSpokenEmojis().hashCode(); + } hash = (37 * hash) + ENABLE_SPEAKER_DIARIZATION_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableSpeakerDiarization()); hash = (37 * hash) + DIARIZATION_SPEAKER_COUNT_FIELD_NUMBER; @@ -1888,6 +2074,18 @@ public Builder clear() { enableAutomaticPunctuation_ = false; + if (enableSpokenPunctuationBuilder_ == null) { + enableSpokenPunctuation_ = null; + } else { + enableSpokenPunctuation_ = null; + enableSpokenPunctuationBuilder_ = null; + } + if (enableSpokenEmojisBuilder_ == null) { + enableSpokenEmojis_ = null; + } else { + enableSpokenEmojis_ = null; + enableSpokenEmojisBuilder_ = null; + } enableSpeakerDiarization_ = false; diarizationSpeakerCount_ = 0; @@ -1965,6 +2163,16 @@ public com.google.cloud.speech.v1p1beta1.RecognitionConfig buildPartial() { result.enableWordTimeOffsets_ = enableWordTimeOffsets_; result.enableWordConfidence_ = enableWordConfidence_; result.enableAutomaticPunctuation_ = enableAutomaticPunctuation_; + if (enableSpokenPunctuationBuilder_ == null) { + result.enableSpokenPunctuation_ = enableSpokenPunctuation_; + } else { + result.enableSpokenPunctuation_ = enableSpokenPunctuationBuilder_.build(); + } + if (enableSpokenEmojisBuilder_ == null) { + result.enableSpokenEmojis_ = enableSpokenEmojis_; + } else { + result.enableSpokenEmojis_ = enableSpokenEmojisBuilder_.build(); + } result.enableSpeakerDiarization_ = enableSpeakerDiarization_; result.diarizationSpeakerCount_ = diarizationSpeakerCount_; if (diarizationConfigBuilder_ == null) { @@ -2100,6 +2308,12 @@ public Builder mergeFrom(com.google.cloud.speech.v1p1beta1.RecognitionConfig oth if (other.getEnableAutomaticPunctuation() != false) { setEnableAutomaticPunctuation(other.getEnableAutomaticPunctuation()); } + if (other.hasEnableSpokenPunctuation()) { + mergeEnableSpokenPunctuation(other.getEnableSpokenPunctuation()); + } + if (other.hasEnableSpokenEmojis()) { + mergeEnableSpokenEmojis(other.getEnableSpokenEmojis()); + } if (other.getEnableSpeakerDiarization() != false) { setEnableSpeakerDiarization(other.getEnableSpeakerDiarization()); } @@ -3008,7 +3222,7 @@ public Builder clearProfanityFilter() { * Speech adaptation configuration improves the accuracy of speech * recognition. When speech adaptation is set it supersedes the * `speech_contexts` field. For more information, see the [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength) + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation) * documentation. * * @@ -3026,7 +3240,7 @@ public boolean hasAdaptation() { * Speech adaptation configuration improves the accuracy of speech * recognition. When speech adaptation is set it supersedes the * `speech_contexts` field. For more information, see the [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength) + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation) * documentation. * * @@ -3050,7 +3264,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechAdaptation getAdaptation() { * Speech adaptation configuration improves the accuracy of speech * recognition. When speech adaptation is set it supersedes the * `speech_contexts` field. For more information, see the [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength) + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation) * documentation. * * @@ -3076,7 +3290,7 @@ public Builder setAdaptation(com.google.cloud.speech.v1p1beta1.SpeechAdaptation * Speech adaptation configuration improves the accuracy of speech * recognition. When speech adaptation is set it supersedes the * `speech_contexts` field. For more information, see the [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength) + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation) * documentation. * * @@ -3100,7 +3314,7 @@ public Builder setAdaptation( * Speech adaptation configuration improves the accuracy of speech * recognition. When speech adaptation is set it supersedes the * `speech_contexts` field. For more information, see the [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength) + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation) * documentation. * * @@ -3130,7 +3344,7 @@ public Builder mergeAdaptation(com.google.cloud.speech.v1p1beta1.SpeechAdaptatio * Speech adaptation configuration improves the accuracy of speech * recognition. When speech adaptation is set it supersedes the * `speech_contexts` field. For more information, see the [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength) + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation) * documentation. * * @@ -3154,7 +3368,7 @@ public Builder clearAdaptation() { * Speech adaptation configuration improves the accuracy of speech * recognition. When speech adaptation is set it supersedes the * `speech_contexts` field. For more information, see the [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength) + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation) * documentation. * * @@ -3172,7 +3386,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechAdaptation.Builder getAdaptationB * Speech adaptation configuration improves the accuracy of speech * recognition. When speech adaptation is set it supersedes the * `speech_contexts` field. For more information, see the [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength) + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation) * documentation. * * @@ -3194,7 +3408,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechAdaptationOrBuilder getAdaptation * Speech adaptation configuration improves the accuracy of speech * recognition. When speech adaptation is set it supersedes the * `speech_contexts` field. For more information, see the [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength) + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation) * documentation. * * @@ -3243,7 +3457,7 @@ private void ensureSpeechContextsIsMutable() { * A means to provide context to assist the speech recognition. For more * information, see * [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). * * * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -3263,7 +3477,7 @@ public java.util.List getSpeech * A means to provide context to assist the speech recognition. For more * information, see * [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). * * * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -3283,7 +3497,7 @@ public int getSpeechContextsCount() { * A means to provide context to assist the speech recognition. For more * information, see * [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). * * * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -3303,7 +3517,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechContext getSpeechContexts(int ind * A means to provide context to assist the speech recognition. For more * information, see * [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). * * * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -3330,7 +3544,7 @@ public Builder setSpeechContexts( * A means to provide context to assist the speech recognition. For more * information, see * [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). * * * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -3354,7 +3568,7 @@ public Builder setSpeechContexts( * A means to provide context to assist the speech recognition. For more * information, see * [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). * * * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -3380,7 +3594,7 @@ public Builder addSpeechContexts(com.google.cloud.speech.v1p1beta1.SpeechContext * A means to provide context to assist the speech recognition. For more * information, see * [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). * * * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -3407,7 +3621,7 @@ public Builder addSpeechContexts( * A means to provide context to assist the speech recognition. For more * information, see * [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). * * * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -3431,7 +3645,7 @@ public Builder addSpeechContexts( * A means to provide context to assist the speech recognition. For more * information, see * [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). * * * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -3455,7 +3669,7 @@ public Builder addSpeechContexts( * A means to provide context to assist the speech recognition. For more * information, see * [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). * * * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -3479,7 +3693,7 @@ public Builder addAllSpeechContexts( * A means to provide context to assist the speech recognition. For more * information, see * [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). * * * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -3502,7 +3716,7 @@ public Builder clearSpeechContexts() { * A means to provide context to assist the speech recognition. For more * information, see * [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). * * * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -3525,7 +3739,7 @@ public Builder removeSpeechContexts(int index) { * A means to provide context to assist the speech recognition. For more * information, see * [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). * * * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -3542,7 +3756,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechContext.Builder getSpeechContexts * A means to provide context to assist the speech recognition. For more * information, see * [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). * * * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -3563,7 +3777,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechContextOrBuilder getSpeechContext * A means to provide context to assist the speech recognition. For more * information, see * [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). * * * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -3584,7 +3798,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechContextOrBuilder getSpeechContext * A means to provide context to assist the speech recognition. For more * information, see * [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). * * * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -3601,7 +3815,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechContext.Builder addSpeechContexts * A means to provide context to assist the speech recognition. For more * information, see * [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). * * * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -3619,7 +3833,7 @@ public com.google.cloud.speech.v1p1beta1.SpeechContext.Builder addSpeechContexts * A means to provide context to assist the speech recognition. For more * information, see * [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). * * * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -3829,6 +4043,467 @@ public Builder clearEnableAutomaticPunctuation() { return this; } + private com.google.protobuf.BoolValue enableSpokenPunctuation_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.BoolValue, + com.google.protobuf.BoolValue.Builder, + com.google.protobuf.BoolValueOrBuilder> + enableSpokenPunctuationBuilder_; + /** + * + * + *
+     * The spoken punctuation behavior for the call
+     * If not set, uses default behavior based on model of choice
+     * e.g. command_and_search will enable spoken punctuation by default
+     * If 'true', replaces spoken punctuation with the corresponding symbols in
+     * the request. For example, "how are you question mark" becomes "how are
+     * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
+     * for support. If 'false', spoken punctuation is not replaced.
+     * 
+ * + * .google.protobuf.BoolValue enable_spoken_punctuation = 22; + * + * @return Whether the enableSpokenPunctuation field is set. + */ + public boolean hasEnableSpokenPunctuation() { + return enableSpokenPunctuationBuilder_ != null || enableSpokenPunctuation_ != null; + } + /** + * + * + *
+     * The spoken punctuation behavior for the call
+     * If not set, uses default behavior based on model of choice
+     * e.g. command_and_search will enable spoken punctuation by default
+     * If 'true', replaces spoken punctuation with the corresponding symbols in
+     * the request. For example, "how are you question mark" becomes "how are
+     * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
+     * for support. If 'false', spoken punctuation is not replaced.
+     * 
+ * + * .google.protobuf.BoolValue enable_spoken_punctuation = 22; + * + * @return The enableSpokenPunctuation. + */ + public com.google.protobuf.BoolValue getEnableSpokenPunctuation() { + if (enableSpokenPunctuationBuilder_ == null) { + return enableSpokenPunctuation_ == null + ? com.google.protobuf.BoolValue.getDefaultInstance() + : enableSpokenPunctuation_; + } else { + return enableSpokenPunctuationBuilder_.getMessage(); + } + } + /** + * + * + *
+     * The spoken punctuation behavior for the call
+     * If not set, uses default behavior based on model of choice
+     * e.g. command_and_search will enable spoken punctuation by default
+     * If 'true', replaces spoken punctuation with the corresponding symbols in
+     * the request. For example, "how are you question mark" becomes "how are
+     * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
+     * for support. If 'false', spoken punctuation is not replaced.
+     * 
+ * + * .google.protobuf.BoolValue enable_spoken_punctuation = 22; + */ + public Builder setEnableSpokenPunctuation(com.google.protobuf.BoolValue value) { + if (enableSpokenPunctuationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + enableSpokenPunctuation_ = value; + onChanged(); + } else { + enableSpokenPunctuationBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * The spoken punctuation behavior for the call
+     * If not set, uses default behavior based on model of choice
+     * e.g. command_and_search will enable spoken punctuation by default
+     * If 'true', replaces spoken punctuation with the corresponding symbols in
+     * the request. For example, "how are you question mark" becomes "how are
+     * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
+     * for support. If 'false', spoken punctuation is not replaced.
+     * 
+ * + * .google.protobuf.BoolValue enable_spoken_punctuation = 22; + */ + public Builder setEnableSpokenPunctuation( + com.google.protobuf.BoolValue.Builder builderForValue) { + if (enableSpokenPunctuationBuilder_ == null) { + enableSpokenPunctuation_ = builderForValue.build(); + onChanged(); + } else { + enableSpokenPunctuationBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * The spoken punctuation behavior for the call
+     * If not set, uses default behavior based on model of choice
+     * e.g. command_and_search will enable spoken punctuation by default
+     * If 'true', replaces spoken punctuation with the corresponding symbols in
+     * the request. For example, "how are you question mark" becomes "how are
+     * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
+     * for support. If 'false', spoken punctuation is not replaced.
+     * 
+ * + * .google.protobuf.BoolValue enable_spoken_punctuation = 22; + */ + public Builder mergeEnableSpokenPunctuation(com.google.protobuf.BoolValue value) { + if (enableSpokenPunctuationBuilder_ == null) { + if (enableSpokenPunctuation_ != null) { + enableSpokenPunctuation_ = + com.google.protobuf.BoolValue.newBuilder(enableSpokenPunctuation_) + .mergeFrom(value) + .buildPartial(); + } else { + enableSpokenPunctuation_ = value; + } + onChanged(); + } else { + enableSpokenPunctuationBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * The spoken punctuation behavior for the call
+     * If not set, uses default behavior based on model of choice
+     * e.g. command_and_search will enable spoken punctuation by default
+     * If 'true', replaces spoken punctuation with the corresponding symbols in
+     * the request. For example, "how are you question mark" becomes "how are
+     * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
+     * for support. If 'false', spoken punctuation is not replaced.
+     * 
+ * + * .google.protobuf.BoolValue enable_spoken_punctuation = 22; + */ + public Builder clearEnableSpokenPunctuation() { + if (enableSpokenPunctuationBuilder_ == null) { + enableSpokenPunctuation_ = null; + onChanged(); + } else { + enableSpokenPunctuation_ = null; + enableSpokenPunctuationBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * The spoken punctuation behavior for the call
+     * If not set, uses default behavior based on model of choice
+     * e.g. command_and_search will enable spoken punctuation by default
+     * If 'true', replaces spoken punctuation with the corresponding symbols in
+     * the request. For example, "how are you question mark" becomes "how are
+     * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
+     * for support. If 'false', spoken punctuation is not replaced.
+     * 
+ * + * .google.protobuf.BoolValue enable_spoken_punctuation = 22; + */ + public com.google.protobuf.BoolValue.Builder getEnableSpokenPunctuationBuilder() { + + onChanged(); + return getEnableSpokenPunctuationFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * The spoken punctuation behavior for the call
+     * If not set, uses default behavior based on model of choice
+     * e.g. command_and_search will enable spoken punctuation by default
+     * If 'true', replaces spoken punctuation with the corresponding symbols in
+     * the request. For example, "how are you question mark" becomes "how are
+     * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
+     * for support. If 'false', spoken punctuation is not replaced.
+     * 
+ * + * .google.protobuf.BoolValue enable_spoken_punctuation = 22; + */ + public com.google.protobuf.BoolValueOrBuilder getEnableSpokenPunctuationOrBuilder() { + if (enableSpokenPunctuationBuilder_ != null) { + return enableSpokenPunctuationBuilder_.getMessageOrBuilder(); + } else { + return enableSpokenPunctuation_ == null + ? com.google.protobuf.BoolValue.getDefaultInstance() + : enableSpokenPunctuation_; + } + } + /** + * + * + *
+     * The spoken punctuation behavior for the call
+     * If not set, uses default behavior based on model of choice
+     * e.g. command_and_search will enable spoken punctuation by default
+     * If 'true', replaces spoken punctuation with the corresponding symbols in
+     * the request. For example, "how are you question mark" becomes "how are
+     * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
+     * for support. If 'false', spoken punctuation is not replaced.
+     * 
+ * + * .google.protobuf.BoolValue enable_spoken_punctuation = 22; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.BoolValue, + com.google.protobuf.BoolValue.Builder, + com.google.protobuf.BoolValueOrBuilder> + getEnableSpokenPunctuationFieldBuilder() { + if (enableSpokenPunctuationBuilder_ == null) { + enableSpokenPunctuationBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.BoolValue, + com.google.protobuf.BoolValue.Builder, + com.google.protobuf.BoolValueOrBuilder>( + getEnableSpokenPunctuation(), getParentForChildren(), isClean()); + enableSpokenPunctuation_ = null; + } + return enableSpokenPunctuationBuilder_; + } + + private com.google.protobuf.BoolValue enableSpokenEmojis_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.BoolValue, + com.google.protobuf.BoolValue.Builder, + com.google.protobuf.BoolValueOrBuilder> + enableSpokenEmojisBuilder_; + /** + * + * + *
+     * The spoken emoji behavior for the call
+     * If not set, uses default behavior based on model of choice
+     * If 'true', adds spoken emoji formatting for the request. This will replace
+     * spoken emojis with the corresponding Unicode symbols in the final
+     * transcript. If 'false', spoken emojis are not replaced.
+     * 
+ * + * .google.protobuf.BoolValue enable_spoken_emojis = 23; + * + * @return Whether the enableSpokenEmojis field is set. + */ + public boolean hasEnableSpokenEmojis() { + return enableSpokenEmojisBuilder_ != null || enableSpokenEmojis_ != null; + } + /** + * + * + *
+     * The spoken emoji behavior for the call
+     * If not set, uses default behavior based on model of choice
+     * If 'true', adds spoken emoji formatting for the request. This will replace
+     * spoken emojis with the corresponding Unicode symbols in the final
+     * transcript. If 'false', spoken emojis are not replaced.
+     * 
+ * + * .google.protobuf.BoolValue enable_spoken_emojis = 23; + * + * @return The enableSpokenEmojis. + */ + public com.google.protobuf.BoolValue getEnableSpokenEmojis() { + if (enableSpokenEmojisBuilder_ == null) { + return enableSpokenEmojis_ == null + ? com.google.protobuf.BoolValue.getDefaultInstance() + : enableSpokenEmojis_; + } else { + return enableSpokenEmojisBuilder_.getMessage(); + } + } + /** + * + * + *
+     * The spoken emoji behavior for the call
+     * If not set, uses default behavior based on model of choice
+     * If 'true', adds spoken emoji formatting for the request. This will replace
+     * spoken emojis with the corresponding Unicode symbols in the final
+     * transcript. If 'false', spoken emojis are not replaced.
+     * 
+ * + * .google.protobuf.BoolValue enable_spoken_emojis = 23; + */ + public Builder setEnableSpokenEmojis(com.google.protobuf.BoolValue value) { + if (enableSpokenEmojisBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + enableSpokenEmojis_ = value; + onChanged(); + } else { + enableSpokenEmojisBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * The spoken emoji behavior for the call
+     * If not set, uses default behavior based on model of choice
+     * If 'true', adds spoken emoji formatting for the request. This will replace
+     * spoken emojis with the corresponding Unicode symbols in the final
+     * transcript. If 'false', spoken emojis are not replaced.
+     * 
+ * + * .google.protobuf.BoolValue enable_spoken_emojis = 23; + */ + public Builder setEnableSpokenEmojis(com.google.protobuf.BoolValue.Builder builderForValue) { + if (enableSpokenEmojisBuilder_ == null) { + enableSpokenEmojis_ = builderForValue.build(); + onChanged(); + } else { + enableSpokenEmojisBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * The spoken emoji behavior for the call
+     * If not set, uses default behavior based on model of choice
+     * If 'true', adds spoken emoji formatting for the request. This will replace
+     * spoken emojis with the corresponding Unicode symbols in the final
+     * transcript. If 'false', spoken emojis are not replaced.
+     * 
+ * + * .google.protobuf.BoolValue enable_spoken_emojis = 23; + */ + public Builder mergeEnableSpokenEmojis(com.google.protobuf.BoolValue value) { + if (enableSpokenEmojisBuilder_ == null) { + if (enableSpokenEmojis_ != null) { + enableSpokenEmojis_ = + com.google.protobuf.BoolValue.newBuilder(enableSpokenEmojis_) + .mergeFrom(value) + .buildPartial(); + } else { + enableSpokenEmojis_ = value; + } + onChanged(); + } else { + enableSpokenEmojisBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * The spoken emoji behavior for the call
+     * If not set, uses default behavior based on model of choice
+     * If 'true', adds spoken emoji formatting for the request. This will replace
+     * spoken emojis with the corresponding Unicode symbols in the final
+     * transcript. If 'false', spoken emojis are not replaced.
+     * 
+ * + * .google.protobuf.BoolValue enable_spoken_emojis = 23; + */ + public Builder clearEnableSpokenEmojis() { + if (enableSpokenEmojisBuilder_ == null) { + enableSpokenEmojis_ = null; + onChanged(); + } else { + enableSpokenEmojis_ = null; + enableSpokenEmojisBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * The spoken emoji behavior for the call
+     * If not set, uses default behavior based on model of choice
+     * If 'true', adds spoken emoji formatting for the request. This will replace
+     * spoken emojis with the corresponding Unicode symbols in the final
+     * transcript. If 'false', spoken emojis are not replaced.
+     * 
+ * + * .google.protobuf.BoolValue enable_spoken_emojis = 23; + */ + public com.google.protobuf.BoolValue.Builder getEnableSpokenEmojisBuilder() { + + onChanged(); + return getEnableSpokenEmojisFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * The spoken emoji behavior for the call
+     * If not set, uses default behavior based on model of choice
+     * If 'true', adds spoken emoji formatting for the request. This will replace
+     * spoken emojis with the corresponding Unicode symbols in the final
+     * transcript. If 'false', spoken emojis are not replaced.
+     * 
+ * + * .google.protobuf.BoolValue enable_spoken_emojis = 23; + */ + public com.google.protobuf.BoolValueOrBuilder getEnableSpokenEmojisOrBuilder() { + if (enableSpokenEmojisBuilder_ != null) { + return enableSpokenEmojisBuilder_.getMessageOrBuilder(); + } else { + return enableSpokenEmojis_ == null + ? com.google.protobuf.BoolValue.getDefaultInstance() + : enableSpokenEmojis_; + } + } + /** + * + * + *
+     * The spoken emoji behavior for the call
+     * If not set, uses default behavior based on model of choice
+     * If 'true', adds spoken emoji formatting for the request. This will replace
+     * spoken emojis with the corresponding Unicode symbols in the final
+     * transcript. If 'false', spoken emojis are not replaced.
+     * 
+ * + * .google.protobuf.BoolValue enable_spoken_emojis = 23; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.BoolValue, + com.google.protobuf.BoolValue.Builder, + com.google.protobuf.BoolValueOrBuilder> + getEnableSpokenEmojisFieldBuilder() { + if (enableSpokenEmojisBuilder_ == null) { + enableSpokenEmojisBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.BoolValue, + com.google.protobuf.BoolValue.Builder, + com.google.protobuf.BoolValueOrBuilder>( + getEnableSpokenEmojis(), getParentForChildren(), isClean()); + enableSpokenEmojis_ = null; + } + return enableSpokenEmojisBuilder_; + } + private boolean enableSpeakerDiarization_; /** * diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionConfigOrBuilder.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionConfigOrBuilder.java index 08087a1d6..2502f4b4f 100644 --- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionConfigOrBuilder.java +++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionConfigOrBuilder.java @@ -281,7 +281,7 @@ public interface RecognitionConfigOrBuilder * Speech adaptation configuration improves the accuracy of speech * recognition. When speech adaptation is set it supersedes the * `speech_contexts` field. For more information, see the [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength) + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation) * documentation. * * @@ -297,7 +297,7 @@ public interface RecognitionConfigOrBuilder * Speech adaptation configuration improves the accuracy of speech * recognition. When speech adaptation is set it supersedes the * `speech_contexts` field. For more information, see the [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength) + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation) * documentation. * * @@ -313,7 +313,7 @@ public interface RecognitionConfigOrBuilder * Speech adaptation configuration improves the accuracy of speech * recognition. When speech adaptation is set it supersedes the * `speech_contexts` field. For more information, see the [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength) + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation) * documentation. * * @@ -329,7 +329,7 @@ public interface RecognitionConfigOrBuilder * A means to provide context to assist the speech recognition. For more * information, see * [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). * * * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -343,7 +343,7 @@ public interface RecognitionConfigOrBuilder * A means to provide context to assist the speech recognition. For more * information, see * [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). * * * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -357,7 +357,7 @@ public interface RecognitionConfigOrBuilder * A means to provide context to assist the speech recognition. For more * information, see * [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). * * * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -371,7 +371,7 @@ public interface RecognitionConfigOrBuilder * A means to provide context to assist the speech recognition. For more * information, see * [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). * * * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -386,7 +386,7 @@ public interface RecognitionConfigOrBuilder * A means to provide context to assist the speech recognition. For more * information, see * [speech - * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). * * * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; @@ -440,6 +440,106 @@ public interface RecognitionConfigOrBuilder */ boolean getEnableAutomaticPunctuation(); + /** + * + * + *
+   * The spoken punctuation behavior for the call
+   * If not set, uses default behavior based on model of choice
+   * e.g. command_and_search will enable spoken punctuation by default
+   * If 'true', replaces spoken punctuation with the corresponding symbols in
+   * the request. For example, "how are you question mark" becomes "how are
+   * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
+   * for support. If 'false', spoken punctuation is not replaced.
+   * 
+ * + * .google.protobuf.BoolValue enable_spoken_punctuation = 22; + * + * @return Whether the enableSpokenPunctuation field is set. + */ + boolean hasEnableSpokenPunctuation(); + /** + * + * + *
+   * The spoken punctuation behavior for the call
+   * If not set, uses default behavior based on model of choice
+   * e.g. command_and_search will enable spoken punctuation by default
+   * If 'true', replaces spoken punctuation with the corresponding symbols in
+   * the request. For example, "how are you question mark" becomes "how are
+   * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
+   * for support. If 'false', spoken punctuation is not replaced.
+   * 
+ * + * .google.protobuf.BoolValue enable_spoken_punctuation = 22; + * + * @return The enableSpokenPunctuation. + */ + com.google.protobuf.BoolValue getEnableSpokenPunctuation(); + /** + * + * + *
+   * The spoken punctuation behavior for the call
+   * If not set, uses default behavior based on model of choice
+   * e.g. command_and_search will enable spoken punctuation by default
+   * If 'true', replaces spoken punctuation with the corresponding symbols in
+   * the request. For example, "how are you question mark" becomes "how are
+   * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
+   * for support. If 'false', spoken punctuation is not replaced.
+   * 
+ * + * .google.protobuf.BoolValue enable_spoken_punctuation = 22; + */ + com.google.protobuf.BoolValueOrBuilder getEnableSpokenPunctuationOrBuilder(); + + /** + * + * + *
+   * The spoken emoji behavior for the call
+   * If not set, uses default behavior based on model of choice
+   * If 'true', adds spoken emoji formatting for the request. This will replace
+   * spoken emojis with the corresponding Unicode symbols in the final
+   * transcript. If 'false', spoken emojis are not replaced.
+   * 
+ * + * .google.protobuf.BoolValue enable_spoken_emojis = 23; + * + * @return Whether the enableSpokenEmojis field is set. + */ + boolean hasEnableSpokenEmojis(); + /** + * + * + *
+   * The spoken emoji behavior for the call
+   * If not set, uses default behavior based on model of choice
+   * If 'true', adds spoken emoji formatting for the request. This will replace
+   * spoken emojis with the corresponding Unicode symbols in the final
+   * transcript. If 'false', spoken emojis are not replaced.
+   * 
+ * + * .google.protobuf.BoolValue enable_spoken_emojis = 23; + * + * @return The enableSpokenEmojis. + */ + com.google.protobuf.BoolValue getEnableSpokenEmojis(); + /** + * + * + *
+   * The spoken emoji behavior for the call
+   * If not set, uses default behavior based on model of choice
+   * If 'true', adds spoken emoji formatting for the request. This will replace
+   * spoken emojis with the corresponding Unicode symbols in the final
+   * transcript. If 'false', spoken emojis are not replaced.
+   * 
+ * + * .google.protobuf.BoolValue enable_spoken_emojis = 23; + */ + com.google.protobuf.BoolValueOrBuilder getEnableSpokenEmojisOrBuilder(); + /** * * diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechProto.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechProto.java index 900dca2b3..41f71dbcd 100644 --- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechProto.java +++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechProto.java @@ -116,147 +116,150 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "/resource.proto\032#google/longrunning/oper" + "ations.proto\032\031google/protobuf/any.proto\032" + "\036google/protobuf/duration.proto\032\037google/" - + "protobuf/timestamp.proto\032\027google/rpc/sta" - + "tus.proto\"\236\001\n\020RecognizeRequest\022E\n\006config" - + "\030\001 \001(\01320.google.cloud.speech.v1p1beta1.R" - + "ecognitionConfigB\003\340A\002\022C\n\005audio\030\002 \001(\0132/.g" - + "oogle.cloud.speech.v1p1beta1.Recognition" - + "AudioB\003\340A\002\"\374\001\n\033LongRunningRecognizeReque" - + "st\022E\n\006config\030\001 \001(\01320.google.cloud.speech" - + ".v1p1beta1.RecognitionConfigB\003\340A\002\022C\n\005aud" - + "io\030\002 \001(\0132/.google.cloud.speech.v1p1beta1" - + ".RecognitionAudioB\003\340A\002\022Q\n\routput_config\030" - + "\004 \001(\01325.google.cloud.speech.v1p1beta1.Tr" - + "anscriptOutputConfigB\003\340A\001\":\n\026TranscriptO" - + "utputConfig\022\021\n\007gcs_uri\030\001 \001(\tH\000B\r\n\013output" - + "_type\"\240\001\n\031StreamingRecognizeRequest\022U\n\020s" - + "treaming_config\030\001 \001(\01329.google.cloud.spe" - + "ech.v1p1beta1.StreamingRecognitionConfig" - + "H\000\022\027\n\raudio_content\030\002 \001(\014H\000B\023\n\021streaming" - + "_request\"\226\001\n\032StreamingRecognitionConfig\022" - + "E\n\006config\030\001 \001(\01320.google.cloud.speech.v1" - + "p1beta1.RecognitionConfigB\003\340A\002\022\030\n\020single" - + "_utterance\030\002 \001(\010\022\027\n\017interim_results\030\003 \001(" - + "\010\"\334\007\n\021RecognitionConfig\022P\n\010encoding\030\001 \001(" - + "\0162>.google.cloud.speech.v1p1beta1.Recogn" - + "itionConfig.AudioEncoding\022\031\n\021sample_rate" - + "_hertz\030\002 \001(\005\022\033\n\023audio_channel_count\030\007 \001(" - + "\005\022/\n\'enable_separate_recognition_per_cha" - + "nnel\030\014 \001(\010\022\032\n\rlanguage_code\030\003 \001(\tB\003\340A\002\022\"" - + "\n\032alternative_language_codes\030\022 \003(\t\022\030\n\020ma" - + "x_alternatives\030\004 \001(\005\022\030\n\020profanity_filter" - + "\030\005 \001(\010\022C\n\nadaptation\030\024 \001(\0132/.google.clou" - + "d.speech.v1p1beta1.SpeechAdaptation\022E\n\017s" - + "peech_contexts\030\006 \003(\0132,.google.cloud.spee" - + "ch.v1p1beta1.SpeechContext\022 \n\030enable_wor" - + "d_time_offsets\030\010 \001(\010\022\036\n\026enable_word_conf" - + "idence\030\017 \001(\010\022$\n\034enable_automatic_punctua" - + "tion\030\013 \001(\010\022&\n\032enable_speaker_diarization" - + "\030\020 \001(\010B\002\030\001\022%\n\031diarization_speaker_count\030" - + "\021 \001(\005B\002\030\001\022S\n\022diarization_config\030\023 \001(\01327." - + "google.cloud.speech.v1p1beta1.SpeakerDia" - + "rizationConfig\022D\n\010metadata\030\t \001(\01322.googl" - + "e.cloud.speech.v1p1beta1.RecognitionMeta" - + "data\022\r\n\005model\030\r \001(\t\022\024\n\014use_enhanced\030\016 \001(" - + "\010\"\224\001\n\rAudioEncoding\022\030\n\024ENCODING_UNSPECIF" - + "IED\020\000\022\014\n\010LINEAR16\020\001\022\010\n\004FLAC\020\002\022\t\n\005MULAW\020\003" - + "\022\007\n\003AMR\020\004\022\n\n\006AMR_WB\020\005\022\014\n\010OGG_OPUS\020\006\022\032\n\026S" - + "PEEX_WITH_HEADER_BYTE\020\007\022\007\n\003MP3\020\010\"\220\001\n\030Spe" - + "akerDiarizationConfig\022\"\n\032enable_speaker_" - + "diarization\030\001 \001(\010\022\031\n\021min_speaker_count\030\002" - + " \001(\005\022\031\n\021max_speaker_count\030\003 \001(\005\022\032\n\013speak" - + "er_tag\030\005 \001(\005B\005\030\001\340A\003\"\327\010\n\023RecognitionMetad" - + "ata\022\\\n\020interaction_type\030\001 \001(\0162B.google.c" - + "loud.speech.v1p1beta1.RecognitionMetadat" - + "a.InteractionType\022$\n\034industry_naics_code" - + "_of_audio\030\003 \001(\r\022b\n\023microphone_distance\030\004" - + " \001(\0162E.google.cloud.speech.v1p1beta1.Rec" - + "ognitionMetadata.MicrophoneDistance\022a\n\023o" - + "riginal_media_type\030\005 \001(\0162D.google.cloud." - + "speech.v1p1beta1.RecognitionMetadata.Ori" - + "ginalMediaType\022e\n\025recording_device_type\030" - + "\006 \001(\0162F.google.cloud.speech.v1p1beta1.Re" - + "cognitionMetadata.RecordingDeviceType\022\035\n" - + "\025recording_device_name\030\007 \001(\t\022\032\n\022original" - + "_mime_type\030\010 \001(\t\022\031\n\robfuscated_id\030\t \001(\003B" - + "\002\030\001\022\023\n\013audio_topic\030\n \001(\t\"\305\001\n\017Interaction" - + "Type\022 \n\034INTERACTION_TYPE_UNSPECIFIED\020\000\022\016" - + "\n\nDISCUSSION\020\001\022\020\n\014PRESENTATION\020\002\022\016\n\nPHON" - + "E_CALL\020\003\022\r\n\tVOICEMAIL\020\004\022\033\n\027PROFESSIONALL" - + "Y_PRODUCED\020\005\022\020\n\014VOICE_SEARCH\020\006\022\021\n\rVOICE_" - + "COMMAND\020\007\022\r\n\tDICTATION\020\010\"d\n\022MicrophoneDi" - + "stance\022#\n\037MICROPHONE_DISTANCE_UNSPECIFIE" - + "D\020\000\022\r\n\tNEARFIELD\020\001\022\014\n\010MIDFIELD\020\002\022\014\n\010FARF" - + "IELD\020\003\"N\n\021OriginalMediaType\022#\n\037ORIGINAL_" - + "MEDIA_TYPE_UNSPECIFIED\020\000\022\t\n\005AUDIO\020\001\022\t\n\005V" - + "IDEO\020\002\"\244\001\n\023RecordingDeviceType\022%\n!RECORD" - + "ING_DEVICE_TYPE_UNSPECIFIED\020\000\022\016\n\nSMARTPH" - + "ONE\020\001\022\006\n\002PC\020\002\022\016\n\nPHONE_LINE\020\003\022\013\n\007VEHICLE" - + "\020\004\022\030\n\024OTHER_OUTDOOR_DEVICE\020\005\022\027\n\023OTHER_IN" - + "DOOR_DEVICE\020\006\"/\n\rSpeechContext\022\017\n\007phrase" - + "s\030\001 \003(\t\022\r\n\005boost\030\004 \001(\002\"D\n\020RecognitionAud" - + "io\022\021\n\007content\030\001 \001(\014H\000\022\r\n\003uri\030\002 \001(\tH\000B\016\n\014" - + "audio_source\"\\\n\021RecognizeResponse\022G\n\007res" - + "ults\030\002 \003(\01326.google.cloud.speech.v1p1bet" - + "a1.SpeechRecognitionResult\"\337\001\n\034LongRunni" - + "ngRecognizeResponse\022G\n\007results\030\002 \003(\01326.g" - + "oogle.cloud.speech.v1p1beta1.SpeechRecog" - + "nitionResult\022L\n\routput_config\030\006 \001(\01325.go" - + "ogle.cloud.speech.v1p1beta1.TranscriptOu" - + "tputConfig\022(\n\014output_error\030\007 \001(\0132\022.googl" - + "e.rpc.Status\"\203\002\n\034LongRunningRecognizeMet" - + "adata\022\030\n\020progress_percent\030\001 \001(\005\022.\n\nstart" - + "_time\030\002 \001(\0132\032.google.protobuf.Timestamp\022" - + "4\n\020last_update_time\030\003 \001(\0132\032.google.proto" - + "buf.Timestamp\022\020\n\003uri\030\004 \001(\tB\003\340A\003\022Q\n\routpu" - + "t_config\030\005 \001(\01325.google.cloud.speech.v1p" - + "1beta1.TranscriptOutputConfigB\003\340A\003\"\277\002\n\032S" - + "treamingRecognizeResponse\022!\n\005error\030\001 \001(\013" - + "2\022.google.rpc.Status\022J\n\007results\030\002 \003(\01329." - + "google.cloud.speech.v1p1beta1.StreamingR" - + "ecognitionResult\022d\n\021speech_event_type\030\004 " - + "\001(\0162I.google.cloud.speech.v1p1beta1.Stre" - + "amingRecognizeResponse.SpeechEventType\"L" - + "\n\017SpeechEventType\022\034\n\030SPEECH_EVENT_UNSPEC" - + "IFIED\020\000\022\033\n\027END_OF_SINGLE_UTTERANCE\020\001\"\371\001\n" - + "\032StreamingRecognitionResult\022Q\n\014alternati" - + "ves\030\001 \003(\0132;.google.cloud.speech.v1p1beta" - + "1.SpeechRecognitionAlternative\022\020\n\010is_fin" - + "al\030\002 \001(\010\022\021\n\tstability\030\003 \001(\002\0222\n\017result_en" - + "d_time\030\004 \001(\0132\031.google.protobuf.Duration\022" - + "\023\n\013channel_tag\030\005 \001(\005\022\032\n\rlanguage_code\030\006 " - + "\001(\tB\003\340A\003\"\235\001\n\027SpeechRecognitionResult\022Q\n\014" - + "alternatives\030\001 \003(\0132;.google.cloud.speech" - + ".v1p1beta1.SpeechRecognitionAlternative\022" - + "\023\n\013channel_tag\030\002 \001(\005\022\032\n\rlanguage_code\030\005 " - + "\001(\tB\003\340A\003\"~\n\034SpeechRecognitionAlternative" - + "\022\022\n\ntranscript\030\001 \001(\t\022\022\n\nconfidence\030\002 \001(\002" - + "\0226\n\005words\030\003 \003(\0132\'.google.cloud.speech.v1" - + "p1beta1.WordInfo\"\242\001\n\010WordInfo\022-\n\nstart_t" - + "ime\030\001 \001(\0132\031.google.protobuf.Duration\022+\n\010" - + "end_time\030\002 \001(\0132\031.google.protobuf.Duratio" - + "n\022\014\n\004word\030\003 \001(\t\022\022\n\nconfidence\030\004 \001(\002\022\030\n\013s" - + "peaker_tag\030\005 \001(\005B\003\340A\0032\202\005\n\006Speech\022\245\001\n\tRec" - + "ognize\022/.google.cloud.speech.v1p1beta1.R" - + "ecognizeRequest\0320.google.cloud.speech.v1" - + "p1beta1.RecognizeResponse\"5\202\323\344\223\002 \"\033/v1p1" - + "beta1/speech:recognize:\001*\332A\014config,audio" - + "\022\362\001\n\024LongRunningRecognize\022:.google.cloud" - + ".speech.v1p1beta1.LongRunningRecognizeRe" - + "quest\032\035.google.longrunning.Operation\"\177\202\323" - + "\344\223\002+\"&/v1p1beta1/speech:longrunningrecog" - + "nize:\001*\332A\014config,audio\312A<\n\034LongRunningRe" - + "cognizeResponse\022\034LongRunningRecognizeMet" - + "adata\022\217\001\n\022StreamingRecognize\0228.google.cl" - + "oud.speech.v1p1beta1.StreamingRecognizeR" - + "equest\0329.google.cloud.speech.v1p1beta1.S" - + "treamingRecognizeResponse\"\000(\0010\001\032I\312A\025spee" - + "ch.googleapis.com\322A.https://www.googleap" - + "is.com/auth/cloud-platformB\200\001\n!com.googl" - + "e.cloud.speech.v1p1beta1B\013SpeechProtoP\001Z" - + "Cgoogle.golang.org/genproto/googleapis/c" - + "loud/speech/v1p1beta1;speech\370\001\001\242\002\003GCSb\006p" - + "roto3" + + "protobuf/timestamp.proto\032\036google/protobu" + + "f/wrappers.proto\032\027google/rpc/status.prot" + + "o\"\236\001\n\020RecognizeRequest\022E\n\006config\030\001 \001(\01320" + + ".google.cloud.speech.v1p1beta1.Recogniti" + + "onConfigB\003\340A\002\022C\n\005audio\030\002 \001(\0132/.google.cl" + + "oud.speech.v1p1beta1.RecognitionAudioB\003\340" + + "A\002\"\374\001\n\033LongRunningRecognizeRequest\022E\n\006co" + + "nfig\030\001 \001(\01320.google.cloud.speech.v1p1bet" + + "a1.RecognitionConfigB\003\340A\002\022C\n\005audio\030\002 \001(\013" + + "2/.google.cloud.speech.v1p1beta1.Recogni" + + "tionAudioB\003\340A\002\022Q\n\routput_config\030\004 \001(\01325." + + "google.cloud.speech.v1p1beta1.Transcript" + + "OutputConfigB\003\340A\001\":\n\026TranscriptOutputCon" + + "fig\022\021\n\007gcs_uri\030\001 \001(\tH\000B\r\n\013output_type\"\240\001" + + "\n\031StreamingRecognizeRequest\022U\n\020streaming" + + "_config\030\001 \001(\01329.google.cloud.speech.v1p1" + + "beta1.StreamingRecognitionConfigH\000\022\027\n\rau" + + "dio_content\030\002 \001(\014H\000B\023\n\021streaming_request" + + "\"\226\001\n\032StreamingRecognitionConfig\022E\n\006confi" + + "g\030\001 \001(\01320.google.cloud.speech.v1p1beta1." + + "RecognitionConfigB\003\340A\002\022\030\n\020single_utteran" + + "ce\030\002 \001(\010\022\027\n\017interim_results\030\003 \001(\010\"\325\010\n\021Re" + + "cognitionConfig\022P\n\010encoding\030\001 \001(\0162>.goog" + + "le.cloud.speech.v1p1beta1.RecognitionCon" + + "fig.AudioEncoding\022\031\n\021sample_rate_hertz\030\002" + + " \001(\005\022\033\n\023audio_channel_count\030\007 \001(\005\022/\n\'ena" + + "ble_separate_recognition_per_channel\030\014 \001" + + "(\010\022\032\n\rlanguage_code\030\003 \001(\tB\003\340A\002\022\"\n\032altern" + + "ative_language_codes\030\022 \003(\t\022\030\n\020max_altern" + + "atives\030\004 \001(\005\022\030\n\020profanity_filter\030\005 \001(\010\022C" + + "\n\nadaptation\030\024 \001(\0132/.google.cloud.speech" + + ".v1p1beta1.SpeechAdaptation\022E\n\017speech_co" + + "ntexts\030\006 \003(\0132,.google.cloud.speech.v1p1b" + + "eta1.SpeechContext\022 \n\030enable_word_time_o" + + "ffsets\030\010 \001(\010\022\036\n\026enable_word_confidence\030\017" + + " \001(\010\022$\n\034enable_automatic_punctuation\030\013 \001" + + "(\010\022=\n\031enable_spoken_punctuation\030\026 \001(\0132\032." + + "google.protobuf.BoolValue\0228\n\024enable_spok" + + "en_emojis\030\027 \001(\0132\032.google.protobuf.BoolVa" + + "lue\022&\n\032enable_speaker_diarization\030\020 \001(\010B" + + "\002\030\001\022%\n\031diarization_speaker_count\030\021 \001(\005B\002" + + "\030\001\022S\n\022diarization_config\030\023 \001(\01327.google." + + "cloud.speech.v1p1beta1.SpeakerDiarizatio" + + "nConfig\022D\n\010metadata\030\t \001(\01322.google.cloud" + + ".speech.v1p1beta1.RecognitionMetadata\022\r\n" + + "\005model\030\r \001(\t\022\024\n\014use_enhanced\030\016 \001(\010\"\224\001\n\rA" + + "udioEncoding\022\030\n\024ENCODING_UNSPECIFIED\020\000\022\014" + + "\n\010LINEAR16\020\001\022\010\n\004FLAC\020\002\022\t\n\005MULAW\020\003\022\007\n\003AMR" + + "\020\004\022\n\n\006AMR_WB\020\005\022\014\n\010OGG_OPUS\020\006\022\032\n\026SPEEX_WI" + + "TH_HEADER_BYTE\020\007\022\007\n\003MP3\020\010\"\220\001\n\030SpeakerDia" + + "rizationConfig\022\"\n\032enable_speaker_diariza" + + "tion\030\001 \001(\010\022\031\n\021min_speaker_count\030\002 \001(\005\022\031\n" + + "\021max_speaker_count\030\003 \001(\005\022\032\n\013speaker_tag\030" + + "\005 \001(\005B\005\030\001\340A\003\"\327\010\n\023RecognitionMetadata\022\\\n\020" + + "interaction_type\030\001 \001(\0162B.google.cloud.sp" + + "eech.v1p1beta1.RecognitionMetadata.Inter" + + "actionType\022$\n\034industry_naics_code_of_aud" + + "io\030\003 \001(\r\022b\n\023microphone_distance\030\004 \001(\0162E." + + "google.cloud.speech.v1p1beta1.Recognitio" + + "nMetadata.MicrophoneDistance\022a\n\023original" + + "_media_type\030\005 \001(\0162D.google.cloud.speech." + + "v1p1beta1.RecognitionMetadata.OriginalMe" + + "diaType\022e\n\025recording_device_type\030\006 \001(\0162F" + + ".google.cloud.speech.v1p1beta1.Recogniti" + + "onMetadata.RecordingDeviceType\022\035\n\025record" + + "ing_device_name\030\007 \001(\t\022\032\n\022original_mime_t" + + "ype\030\010 \001(\t\022\031\n\robfuscated_id\030\t \001(\003B\002\030\001\022\023\n\013" + + "audio_topic\030\n \001(\t\"\305\001\n\017InteractionType\022 \n" + + "\034INTERACTION_TYPE_UNSPECIFIED\020\000\022\016\n\nDISCU" + + "SSION\020\001\022\020\n\014PRESENTATION\020\002\022\016\n\nPHONE_CALL\020" + + "\003\022\r\n\tVOICEMAIL\020\004\022\033\n\027PROFESSIONALLY_PRODU" + + "CED\020\005\022\020\n\014VOICE_SEARCH\020\006\022\021\n\rVOICE_COMMAND" + + "\020\007\022\r\n\tDICTATION\020\010\"d\n\022MicrophoneDistance\022" + + "#\n\037MICROPHONE_DISTANCE_UNSPECIFIED\020\000\022\r\n\t" + + "NEARFIELD\020\001\022\014\n\010MIDFIELD\020\002\022\014\n\010FARFIELD\020\003\"" + + "N\n\021OriginalMediaType\022#\n\037ORIGINAL_MEDIA_T" + + "YPE_UNSPECIFIED\020\000\022\t\n\005AUDIO\020\001\022\t\n\005VIDEO\020\002\"" + + "\244\001\n\023RecordingDeviceType\022%\n!RECORDING_DEV" + + "ICE_TYPE_UNSPECIFIED\020\000\022\016\n\nSMARTPHONE\020\001\022\006" + + "\n\002PC\020\002\022\016\n\nPHONE_LINE\020\003\022\013\n\007VEHICLE\020\004\022\030\n\024O" + + "THER_OUTDOOR_DEVICE\020\005\022\027\n\023OTHER_INDOOR_DE" + + "VICE\020\006\"/\n\rSpeechContext\022\017\n\007phrases\030\001 \003(\t" + + "\022\r\n\005boost\030\004 \001(\002\"D\n\020RecognitionAudio\022\021\n\007c" + + "ontent\030\001 \001(\014H\000\022\r\n\003uri\030\002 \001(\tH\000B\016\n\014audio_s" + + "ource\"\\\n\021RecognizeResponse\022G\n\007results\030\002 " + + "\003(\01326.google.cloud.speech.v1p1beta1.Spee" + + "chRecognitionResult\"\337\001\n\034LongRunningRecog" + + "nizeResponse\022G\n\007results\030\002 \003(\01326.google.c" + + "loud.speech.v1p1beta1.SpeechRecognitionR" + + "esult\022L\n\routput_config\030\006 \001(\01325.google.cl" + + "oud.speech.v1p1beta1.TranscriptOutputCon" + + "fig\022(\n\014output_error\030\007 \001(\0132\022.google.rpc.S" + + "tatus\"\203\002\n\034LongRunningRecognizeMetadata\022\030" + + "\n\020progress_percent\030\001 \001(\005\022.\n\nstart_time\030\002" + + " \001(\0132\032.google.protobuf.Timestamp\0224\n\020last" + + "_update_time\030\003 \001(\0132\032.google.protobuf.Tim" + + "estamp\022\020\n\003uri\030\004 \001(\tB\003\340A\003\022Q\n\routput_confi" + + "g\030\005 \001(\01325.google.cloud.speech.v1p1beta1." + + "TranscriptOutputConfigB\003\340A\003\"\277\002\n\032Streamin" + + "gRecognizeResponse\022!\n\005error\030\001 \001(\0132\022.goog" + + "le.rpc.Status\022J\n\007results\030\002 \003(\01329.google." + + "cloud.speech.v1p1beta1.StreamingRecognit" + + "ionResult\022d\n\021speech_event_type\030\004 \001(\0162I.g" + + "oogle.cloud.speech.v1p1beta1.StreamingRe" + + "cognizeResponse.SpeechEventType\"L\n\017Speec" + + "hEventType\022\034\n\030SPEECH_EVENT_UNSPECIFIED\020\000" + + "\022\033\n\027END_OF_SINGLE_UTTERANCE\020\001\"\371\001\n\032Stream" + + "ingRecognitionResult\022Q\n\014alternatives\030\001 \003" + + "(\0132;.google.cloud.speech.v1p1beta1.Speec" + + "hRecognitionAlternative\022\020\n\010is_final\030\002 \001(" + + "\010\022\021\n\tstability\030\003 \001(\002\0222\n\017result_end_time\030" + + "\004 \001(\0132\031.google.protobuf.Duration\022\023\n\013chan" + + "nel_tag\030\005 \001(\005\022\032\n\rlanguage_code\030\006 \001(\tB\003\340A" + + "\003\"\235\001\n\027SpeechRecognitionResult\022Q\n\014alterna" + + "tives\030\001 \003(\0132;.google.cloud.speech.v1p1be" + + "ta1.SpeechRecognitionAlternative\022\023\n\013chan" + + "nel_tag\030\002 \001(\005\022\032\n\rlanguage_code\030\005 \001(\tB\003\340A" + + "\003\"~\n\034SpeechRecognitionAlternative\022\022\n\ntra" + + "nscript\030\001 \001(\t\022\022\n\nconfidence\030\002 \001(\002\0226\n\005wor" + + "ds\030\003 \003(\0132\'.google.cloud.speech.v1p1beta1" + + ".WordInfo\"\242\001\n\010WordInfo\022-\n\nstart_time\030\001 \001" + + "(\0132\031.google.protobuf.Duration\022+\n\010end_tim" + + "e\030\002 \001(\0132\031.google.protobuf.Duration\022\014\n\004wo" + + "rd\030\003 \001(\t\022\022\n\nconfidence\030\004 \001(\002\022\030\n\013speaker_" + + "tag\030\005 \001(\005B\003\340A\0032\202\005\n\006Speech\022\245\001\n\tRecognize\022" + + "/.google.cloud.speech.v1p1beta1.Recogniz" + + "eRequest\0320.google.cloud.speech.v1p1beta1" + + ".RecognizeResponse\"5\202\323\344\223\002 \"\033/v1p1beta1/s" + + "peech:recognize:\001*\332A\014config,audio\022\362\001\n\024Lo" + + "ngRunningRecognize\022:.google.cloud.speech" + + ".v1p1beta1.LongRunningRecognizeRequest\032\035" + + ".google.longrunning.Operation\"\177\202\323\344\223\002+\"&/" + + "v1p1beta1/speech:longrunningrecognize:\001*" + + "\332A\014config,audio\312A<\n\034LongRunningRecognize" + + "Response\022\034LongRunningRecognizeMetadata\022\217" + + "\001\n\022StreamingRecognize\0228.google.cloud.spe" + + "ech.v1p1beta1.StreamingRecognizeRequest\032" + + "9.google.cloud.speech.v1p1beta1.Streamin" + + "gRecognizeResponse\"\000(\0010\001\032I\312A\025speech.goog" + + "leapis.com\322A.https://www.googleapis.com/" + + "auth/cloud-platformB\200\001\n!com.google.cloud" + + ".speech.v1p1beta1B\013SpeechProtoP\001ZCgoogle" + + ".golang.org/genproto/googleapis/cloud/sp" + + "eech/v1p1beta1;speech\370\001\001\242\002\003GCSb\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( @@ -270,6 +273,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { com.google.protobuf.AnyProto.getDescriptor(), com.google.protobuf.DurationProto.getDescriptor(), com.google.protobuf.TimestampProto.getDescriptor(), + com.google.protobuf.WrappersProto.getDescriptor(), com.google.rpc.StatusProto.getDescriptor(), }); internal_static_google_cloud_speech_v1p1beta1_RecognizeRequest_descriptor = @@ -331,6 +335,8 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "EnableWordTimeOffsets", "EnableWordConfidence", "EnableAutomaticPunctuation", + "EnableSpokenPunctuation", + "EnableSpokenEmojis", "EnableSpeakerDiarization", "DiarizationSpeakerCount", "DiarizationConfig", @@ -460,6 +466,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { com.google.protobuf.AnyProto.getDescriptor(); com.google.protobuf.DurationProto.getDescriptor(); com.google.protobuf.TimestampProto.getDescriptor(); + com.google.protobuf.WrappersProto.getDescriptor(); com.google.rpc.StatusProto.getDescriptor(); } diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognizeResponse.java b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognizeResponse.java index 247307470..b025a4520 100644 --- a/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognizeResponse.java +++ b/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/StreamingRecognizeResponse.java @@ -27,8 +27,8 @@ * messages are streamed back to the client. If there is no recognizable * audio, and `single_utterance` is set to false, then no messages are streamed * back to the client. - * Here's an example of a series of ten `StreamingRecognizeResponse`s that might - * be returned while processing audio: + * Here's an example of a series of `StreamingRecognizeResponse`s that might be + * returned while processing audio: * 1. results { alternatives { transcript: "tube" } stability: 0.01 } * 2. results { alternatives { transcript: "to be a" } stability: 0.01 } * 3. results { alternatives { transcript: "to be" } stability: 0.9 } @@ -710,8 +710,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * messages are streamed back to the client. If there is no recognizable * audio, and `single_utterance` is set to false, then no messages are streamed * back to the client. - * Here's an example of a series of ten `StreamingRecognizeResponse`s that might - * be returned while processing audio: + * Here's an example of a series of `StreamingRecognizeResponse`s that might be + * returned while processing audio: * 1. results { alternatives { transcript: "tube" } stability: 0.01 } * 2. results { alternatives { transcript: "to be a" } stability: 0.01 } * 3. results { alternatives { transcript: "to be" } stability: 0.9 } diff --git a/proto-google-cloud-speech-v1p1beta1/src/main/proto/google/cloud/speech/v1p1beta1/cloud_speech.proto b/proto-google-cloud-speech-v1p1beta1/src/main/proto/google/cloud/speech/v1p1beta1/cloud_speech.proto index 9a8e256f0..cf183d012 100644 --- a/proto-google-cloud-speech-v1p1beta1/src/main/proto/google/cloud/speech/v1p1beta1/cloud_speech.proto +++ b/proto-google-cloud-speech-v1p1beta1/src/main/proto/google/cloud/speech/v1p1beta1/cloud_speech.proto @@ -1,3 +1,4 @@ + // Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,6 +25,7 @@ import "google/longrunning/operations.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; import "google/rpc/status.proto"; option cc_enable_arenas = true; @@ -316,7 +318,7 @@ message RecognitionConfig { // Speech adaptation configuration improves the accuracy of speech // recognition. When speech adaptation is set it supersedes the // `speech_contexts` field. For more information, see the [speech - // adaptation](https://cloud.google.com/speech-to-text/docs/context-strength) + // adaptation](https://cloud.google.com/speech-to-text/docs/adaptation) // documentation. SpeechAdaptation adaptation = 20; @@ -324,7 +326,7 @@ message RecognitionConfig { // A means to provide context to assist the speech recognition. For more // information, see // [speech - // adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). + // adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). repeated SpeechContext speech_contexts = 6; // If `true`, the top result includes a list of words and @@ -344,6 +346,22 @@ message RecognitionConfig { // The default 'false' value does not add punctuation to result hypotheses. bool enable_automatic_punctuation = 11; + // The spoken punctuation behavior for the call + // If not set, uses default behavior based on model of choice + // e.g. command_and_search will enable spoken punctuation by default + // If 'true', replaces spoken punctuation with the corresponding symbols in + // the request. For example, "how are you question mark" becomes "how are + // you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation + // for support. If 'false', spoken punctuation is not replaced. + google.protobuf.BoolValue enable_spoken_punctuation = 22; + + // The spoken emoji behavior for the call + // If not set, uses default behavior based on model of choice + // If 'true', adds spoken emoji formatting for the request. This will replace + // spoken emojis with the corresponding Unicode symbols in the final + // transcript. If 'false', spoken emojis are not replaced. + google.protobuf.BoolValue enable_spoken_emojis = 23; + // If 'true', enables speaker detection for each recognized word in // the top alternative of the recognition result using a speaker_tag provided // in the WordInfo. @@ -674,8 +692,8 @@ message LongRunningRecognizeMetadata { // audio, and `single_utterance` is set to false, then no messages are streamed // back to the client. // -// Here's an example of a series of ten `StreamingRecognizeResponse`s that might -// be returned while processing audio: +// Here's an example of a series of `StreamingRecognizeResponse`s that might be +// returned while processing audio: // // 1. results { alternatives { transcript: "tube" } stability: 0.01 } // diff --git a/synth.metadata b/synth.metadata index 636bacefa..e8481473a 100644 --- a/synth.metadata +++ b/synth.metadata @@ -4,23 +4,23 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/java-speech.git", - "sha": "d4a5dce3cb427f917d99a98ce9fcc9dcaad1bbab" + "sha": "3ec1e9c20ac27a4ef3e16c3f03f85039a8a68374" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "72326861be446be27d53af95c87e6e313367c371", - "internalRef": "362934100" + "sha": "93b078ae0decd51e618041bb337a8d592d0c998b", + "internalRef": "367239272" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "72326861be446be27d53af95c87e6e313367c371", - "internalRef": "362934100" + "sha": "93b078ae0decd51e618041bb337a8d592d0c998b", + "internalRef": "367239272" } }, {