diff --git a/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/package-info.java b/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/package-info.java index 0da865c4..b9d57637 100644 --- a/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/package-info.java +++ b/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/package-info.java @@ -15,7 +15,9 @@ */ /** - * The interfaces provided are listed below, along with usage samples. + * A client to Cloud Text-to-Speech API + * + *

The interfaces provided are listed below, along with usage samples. * *

======================= TextToSpeechClient ======================= * diff --git a/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1beta1/TextToSpeechClient.java b/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1beta1/TextToSpeechClient.java index 2a6e729e..20e524fc 100644 --- a/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1beta1/TextToSpeechClient.java +++ b/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1beta1/TextToSpeechClient.java @@ -154,11 +154,11 @@ public TextToSpeechStub getStub() { * @param languageCode Optional. Recommended. * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If not specified, the * API will return all supported voices. If specified, the ListVoices call will only return - * voices that can be used to synthesize this language_code. E.g. when specifying "en-NZ", you - * will get supported "en-NZ" voices; when specifying "no", you will get supported - * "no-\\*" (Norwegian) and "nb-\\*" (Norwegian Bokmal) voices; specifying "zh" will - * also get supported "cmn-\\*" voices; specifying "zh-hk" will also get supported - * "yue-hk" voices. + * voices that can be used to synthesize this language_code. E.g. when specifying `"en-NZ"`, + * you will get supported `"en-NZ"` voices; when specifying `"no"`, you will get supported + * `"no-\\*"` (Norwegian) and `"nb-\\*"` (Norwegian Bokmal) voices; specifying `"zh"` + * will also get supported `"cmn-\\*"` voices; specifying `"zh-hk"` will also get + * supported `"yue-hk"` voices. * @throws com.google.api.gax.rpc.ApiException if the remote call fails */ public final ListVoicesResponse listVoices(String languageCode) { diff --git a/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1beta1/package-info.java b/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1beta1/package-info.java index 9e8914a5..44758f38 100644 --- a/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1beta1/package-info.java +++ b/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1beta1/package-info.java @@ -15,7 +15,9 @@ */ /** - * The interfaces provided are listed below, along with usage samples. + * A client to Cloud Text-to-Speech API + * + *

The interfaces provided are listed below, along with usage samples. * *

======================= TextToSpeechClient ======================= * diff --git a/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/CustomVoiceParams.java b/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/CustomVoiceParams.java new file mode 100644 index 00000000..cf3399d4 --- /dev/null +++ b/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/CustomVoiceParams.java @@ -0,0 +1,996 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/texttospeech/v1beta1/cloud_tts.proto + +package com.google.cloud.texttospeech.v1beta1; + +/** + * + * + *

+ * Description of the custom voice to be synthesized.
+ * 
+ * + * Protobuf type {@code google.cloud.texttospeech.v1beta1.CustomVoiceParams} + */ +public final class CustomVoiceParams extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.texttospeech.v1beta1.CustomVoiceParams) + CustomVoiceParamsOrBuilder { + private static final long serialVersionUID = 0L; + // Use CustomVoiceParams.newBuilder() to construct. + private CustomVoiceParams(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private CustomVoiceParams() { + model_ = ""; + reportedUsage_ = 0; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new CustomVoiceParams(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private CustomVoiceParams( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + model_ = s; + break; + } + case 24: + { + int rawValue = input.readEnum(); + + reportedUsage_ = rawValue; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.texttospeech.v1beta1.TextToSpeechProto + .internal_static_google_cloud_texttospeech_v1beta1_CustomVoiceParams_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.texttospeech.v1beta1.TextToSpeechProto + .internal_static_google_cloud_texttospeech_v1beta1_CustomVoiceParams_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.class, + com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.Builder.class); + } + + /** + * + * + *
+   * The usage of the synthesized audio. You must report your honest and
+   * correct usage of the service as it's regulated by contract and will cause
+   * significant difference in billing.
+   * 
+ * + * Protobuf enum {@code google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage} + */ + public enum ReportedUsage implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * Request with reported usage unspecified will be rejected.
+     * 
+ * + * REPORTED_USAGE_UNSPECIFIED = 0; + */ + REPORTED_USAGE_UNSPECIFIED(0), + /** + * + * + *
+     * For scenarios where the synthesized audio is not downloadable and can
+     * only be used once. For example, real-time request in IVR system.
+     * 
+ * + * REALTIME = 1; + */ + REALTIME(1), + /** + * + * + *
+     * For scenarios where the synthesized audio is downloadable and can be
+     * reused. For example, the synthesized audio is downloaded, stored in
+     * customer service system and played repeatedly.
+     * 
+ * + * OFFLINE = 2; + */ + OFFLINE(2), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+     * Request with reported usage unspecified will be rejected.
+     * 
+ * + * REPORTED_USAGE_UNSPECIFIED = 0; + */ + public static final int REPORTED_USAGE_UNSPECIFIED_VALUE = 0; + /** + * + * + *
+     * For scenarios where the synthesized audio is not downloadable and can
+     * only be used once. For example, real-time request in IVR system.
+     * 
+ * + * REALTIME = 1; + */ + public static final int REALTIME_VALUE = 1; + /** + * + * + *
+     * For scenarios where the synthesized audio is downloadable and can be
+     * reused. For example, the synthesized audio is downloaded, stored in
+     * customer service system and played repeatedly.
+     * 
+ * + * OFFLINE = 2; + */ + public static final int OFFLINE_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ReportedUsage valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static ReportedUsage forNumber(int value) { + switch (value) { + case 0: + return REPORTED_USAGE_UNSPECIFIED; + case 1: + return REALTIME; + case 2: + return OFFLINE; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public ReportedUsage findValueByNumber(int number) { + return ReportedUsage.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final ReportedUsage[] VALUES = values(); + + public static ReportedUsage valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private ReportedUsage(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage) + } + + public static final int MODEL_FIELD_NUMBER = 1; + private volatile java.lang.Object model_; + /** + * + * + *
+   * Required. The name of the AutoML model that synthesizes the custom voice.
+   * 
+ * + * + * string model = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The model. + */ + @java.lang.Override + public java.lang.String getModel() { + java.lang.Object ref = model_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + model_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The name of the AutoML model that synthesizes the custom voice.
+   * 
+ * + * + * string model = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for model. + */ + @java.lang.Override + public com.google.protobuf.ByteString getModelBytes() { + java.lang.Object ref = model_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + model_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REPORTED_USAGE_FIELD_NUMBER = 3; + private int reportedUsage_; + /** + * + * + *
+   * Optional. The usage of the synthesized audio to be reported.
+   * 
+ * + * + * .google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage reported_usage = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for reportedUsage. + */ + @java.lang.Override + public int getReportedUsageValue() { + return reportedUsage_; + } + /** + * + * + *
+   * Optional. The usage of the synthesized audio to be reported.
+   * 
+ * + * + * .google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage reported_usage = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The reportedUsage. + */ + @java.lang.Override + public com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage getReportedUsage() { + @SuppressWarnings("deprecation") + com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage result = + com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage.valueOf( + reportedUsage_); + return result == null + ? com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage.UNRECOGNIZED + : result; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(model_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, model_); + } + if (reportedUsage_ + != com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage + .REPORTED_USAGE_UNSPECIFIED + .getNumber()) { + output.writeEnum(3, reportedUsage_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(model_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, model_); + } + if (reportedUsage_ + != com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage + .REPORTED_USAGE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(3, reportedUsage_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.texttospeech.v1beta1.CustomVoiceParams)) { + return super.equals(obj); + } + com.google.cloud.texttospeech.v1beta1.CustomVoiceParams other = + (com.google.cloud.texttospeech.v1beta1.CustomVoiceParams) obj; + + if (!getModel().equals(other.getModel())) return false; + if (reportedUsage_ != other.reportedUsage_) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + MODEL_FIELD_NUMBER; + hash = (53 * hash) + getModel().hashCode(); + hash = (37 * hash) + REPORTED_USAGE_FIELD_NUMBER; + hash = (53 * hash) + reportedUsage_; + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.texttospeech.v1beta1.CustomVoiceParams parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.texttospeech.v1beta1.CustomVoiceParams parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.texttospeech.v1beta1.CustomVoiceParams parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.texttospeech.v1beta1.CustomVoiceParams parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.texttospeech.v1beta1.CustomVoiceParams parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.texttospeech.v1beta1.CustomVoiceParams parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.texttospeech.v1beta1.CustomVoiceParams parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.texttospeech.v1beta1.CustomVoiceParams parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.texttospeech.v1beta1.CustomVoiceParams parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.texttospeech.v1beta1.CustomVoiceParams parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.texttospeech.v1beta1.CustomVoiceParams parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.texttospeech.v1beta1.CustomVoiceParams parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.texttospeech.v1beta1.CustomVoiceParams prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Description of the custom voice to be synthesized.
+   * 
+ * + * Protobuf type {@code google.cloud.texttospeech.v1beta1.CustomVoiceParams} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.texttospeech.v1beta1.CustomVoiceParams) + com.google.cloud.texttospeech.v1beta1.CustomVoiceParamsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.texttospeech.v1beta1.TextToSpeechProto + .internal_static_google_cloud_texttospeech_v1beta1_CustomVoiceParams_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.texttospeech.v1beta1.TextToSpeechProto + .internal_static_google_cloud_texttospeech_v1beta1_CustomVoiceParams_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.class, + com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.Builder.class); + } + + // Construct using com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + model_ = ""; + + reportedUsage_ = 0; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.texttospeech.v1beta1.TextToSpeechProto + .internal_static_google_cloud_texttospeech_v1beta1_CustomVoiceParams_descriptor; + } + + @java.lang.Override + public com.google.cloud.texttospeech.v1beta1.CustomVoiceParams getDefaultInstanceForType() { + return com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.texttospeech.v1beta1.CustomVoiceParams build() { + com.google.cloud.texttospeech.v1beta1.CustomVoiceParams result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.texttospeech.v1beta1.CustomVoiceParams buildPartial() { + com.google.cloud.texttospeech.v1beta1.CustomVoiceParams result = + new com.google.cloud.texttospeech.v1beta1.CustomVoiceParams(this); + result.model_ = model_; + result.reportedUsage_ = reportedUsage_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.texttospeech.v1beta1.CustomVoiceParams) { + return mergeFrom((com.google.cloud.texttospeech.v1beta1.CustomVoiceParams) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.texttospeech.v1beta1.CustomVoiceParams other) { + if (other == com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.getDefaultInstance()) + return this; + if (!other.getModel().isEmpty()) { + model_ = other.model_; + onChanged(); + } + if (other.reportedUsage_ != 0) { + setReportedUsageValue(other.getReportedUsageValue()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.texttospeech.v1beta1.CustomVoiceParams parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.texttospeech.v1beta1.CustomVoiceParams) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object model_ = ""; + /** + * + * + *
+     * Required. The name of the AutoML model that synthesizes the custom voice.
+     * 
+ * + * + * string model = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The model. + */ + public java.lang.String getModel() { + java.lang.Object ref = model_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + model_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The name of the AutoML model that synthesizes the custom voice.
+     * 
+ * + * + * string model = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for model. + */ + public com.google.protobuf.ByteString getModelBytes() { + java.lang.Object ref = model_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + model_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The name of the AutoML model that synthesizes the custom voice.
+     * 
+ * + * + * string model = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The model to set. + * @return This builder for chaining. + */ + public Builder setModel(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + model_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The name of the AutoML model that synthesizes the custom voice.
+     * 
+ * + * + * string model = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearModel() { + + model_ = getDefaultInstance().getModel(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The name of the AutoML model that synthesizes the custom voice.
+     * 
+ * + * + * string model = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for model to set. + * @return This builder for chaining. + */ + public Builder setModelBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + model_ = value; + onChanged(); + return this; + } + + private int reportedUsage_ = 0; + /** + * + * + *
+     * Optional. The usage of the synthesized audio to be reported.
+     * 
+ * + * + * .google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage reported_usage = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for reportedUsage. + */ + @java.lang.Override + public int getReportedUsageValue() { + return reportedUsage_; + } + /** + * + * + *
+     * Optional. The usage of the synthesized audio to be reported.
+     * 
+ * + * + * .google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage reported_usage = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The enum numeric value on the wire for reportedUsage to set. + * @return This builder for chaining. + */ + public Builder setReportedUsageValue(int value) { + + reportedUsage_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The usage of the synthesized audio to be reported.
+     * 
+ * + * + * .google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage reported_usage = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The reportedUsage. + */ + @java.lang.Override + public com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage + getReportedUsage() { + @SuppressWarnings("deprecation") + com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage result = + com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage.valueOf( + reportedUsage_); + return result == null + ? com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage.UNRECOGNIZED + : result; + } + /** + * + * + *
+     * Optional. The usage of the synthesized audio to be reported.
+     * 
+ * + * + * .google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage reported_usage = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The reportedUsage to set. + * @return This builder for chaining. + */ + public Builder setReportedUsage( + com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage value) { + if (value == null) { + throw new NullPointerException(); + } + + reportedUsage_ = value.getNumber(); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The usage of the synthesized audio to be reported.
+     * 
+ * + * + * .google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage reported_usage = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearReportedUsage() { + + reportedUsage_ = 0; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.texttospeech.v1beta1.CustomVoiceParams) + } + + // @@protoc_insertion_point(class_scope:google.cloud.texttospeech.v1beta1.CustomVoiceParams) + private static final com.google.cloud.texttospeech.v1beta1.CustomVoiceParams DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.texttospeech.v1beta1.CustomVoiceParams(); + } + + public static com.google.cloud.texttospeech.v1beta1.CustomVoiceParams getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CustomVoiceParams parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new CustomVoiceParams(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.texttospeech.v1beta1.CustomVoiceParams getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/CustomVoiceParamsOrBuilder.java b/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/CustomVoiceParamsOrBuilder.java new file mode 100644 index 00000000..7d9a5f16 --- /dev/null +++ b/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/CustomVoiceParamsOrBuilder.java @@ -0,0 +1,83 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/texttospeech/v1beta1/cloud_tts.proto + +package com.google.cloud.texttospeech.v1beta1; + +public interface CustomVoiceParamsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.texttospeech.v1beta1.CustomVoiceParams) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The name of the AutoML model that synthesizes the custom voice.
+   * 
+ * + * + * string model = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The model. + */ + java.lang.String getModel(); + /** + * + * + *
+   * Required. The name of the AutoML model that synthesizes the custom voice.
+   * 
+ * + * + * string model = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for model. + */ + com.google.protobuf.ByteString getModelBytes(); + + /** + * + * + *
+   * Optional. The usage of the synthesized audio to be reported.
+   * 
+ * + * + * .google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage reported_usage = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for reportedUsage. + */ + int getReportedUsageValue(); + /** + * + * + *
+   * Optional. The usage of the synthesized audio to be reported.
+   * 
+ * + * + * .google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage reported_usage = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The reportedUsage. + */ + com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage getReportedUsage(); +} diff --git a/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/ListVoicesRequest.java b/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/ListVoicesRequest.java index 80847cb3..e429442c 100644 --- a/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/ListVoicesRequest.java +++ b/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/ListVoicesRequest.java @@ -121,11 +121,11 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. * If not specified, the API will return all supported voices. * If specified, the ListVoices call will only return voices that can be used - * to synthesize this language_code. E.g. when specifying "en-NZ", you will - * get supported "en-NZ" voices; when specifying "no", you will get supported - * "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh" - * will also get supported "cmn-\*" voices; specifying "zh-hk" will also get - * supported "yue-hk" voices. + * to synthesize this language_code. E.g. when specifying `"en-NZ"`, you will + * get supported `"en-NZ"` voices; when specifying `"no"`, you will get + * supported `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices; + * specifying `"zh"` will also get supported `"cmn-\*"` voices; specifying + * `"zh-hk"` will also get supported `"yue-hk"` voices. * * * string language_code = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -152,11 +152,11 @@ public java.lang.String getLanguageCode() { * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. * If not specified, the API will return all supported voices. * If specified, the ListVoices call will only return voices that can be used - * to synthesize this language_code. E.g. when specifying "en-NZ", you will - * get supported "en-NZ" voices; when specifying "no", you will get supported - * "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh" - * will also get supported "cmn-\*" voices; specifying "zh-hk" will also get - * supported "yue-hk" voices. + * to synthesize this language_code. E.g. when specifying `"en-NZ"`, you will + * get supported `"en-NZ"` voices; when specifying `"no"`, you will get + * supported `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices; + * specifying `"zh"` will also get supported `"cmn-\*"` voices; specifying + * `"zh-hk"` will also get supported `"yue-hk"` voices. * * * string language_code = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -504,11 +504,11 @@ public Builder mergeFrom( * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. * If not specified, the API will return all supported voices. * If specified, the ListVoices call will only return voices that can be used - * to synthesize this language_code. E.g. when specifying "en-NZ", you will - * get supported "en-NZ" voices; when specifying "no", you will get supported - * "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh" - * will also get supported "cmn-\*" voices; specifying "zh-hk" will also get - * supported "yue-hk" voices. + * to synthesize this language_code. E.g. when specifying `"en-NZ"`, you will + * get supported `"en-NZ"` voices; when specifying `"no"`, you will get + * supported `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices; + * specifying `"zh"` will also get supported `"cmn-\*"` voices; specifying + * `"zh-hk"` will also get supported `"yue-hk"` voices. * * * string language_code = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -534,11 +534,11 @@ public java.lang.String getLanguageCode() { * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. * If not specified, the API will return all supported voices. * If specified, the ListVoices call will only return voices that can be used - * to synthesize this language_code. E.g. when specifying "en-NZ", you will - * get supported "en-NZ" voices; when specifying "no", you will get supported - * "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh" - * will also get supported "cmn-\*" voices; specifying "zh-hk" will also get - * supported "yue-hk" voices. + * to synthesize this language_code. E.g. when specifying `"en-NZ"`, you will + * get supported `"en-NZ"` voices; when specifying `"no"`, you will get + * supported `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices; + * specifying `"zh"` will also get supported `"cmn-\*"` voices; specifying + * `"zh-hk"` will also get supported `"yue-hk"` voices. * * * string language_code = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -564,11 +564,11 @@ public com.google.protobuf.ByteString getLanguageCodeBytes() { * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. * If not specified, the API will return all supported voices. * If specified, the ListVoices call will only return voices that can be used - * to synthesize this language_code. E.g. when specifying "en-NZ", you will - * get supported "en-NZ" voices; when specifying "no", you will get supported - * "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh" - * will also get supported "cmn-\*" voices; specifying "zh-hk" will also get - * supported "yue-hk" voices. + * to synthesize this language_code. E.g. when specifying `"en-NZ"`, you will + * get supported `"en-NZ"` voices; when specifying `"no"`, you will get + * supported `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices; + * specifying `"zh"` will also get supported `"cmn-\*"` voices; specifying + * `"zh-hk"` will also get supported `"yue-hk"` voices. * * * string language_code = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -593,11 +593,11 @@ public Builder setLanguageCode(java.lang.String value) { * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. * If not specified, the API will return all supported voices. * If specified, the ListVoices call will only return voices that can be used - * to synthesize this language_code. E.g. when specifying "en-NZ", you will - * get supported "en-NZ" voices; when specifying "no", you will get supported - * "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh" - * will also get supported "cmn-\*" voices; specifying "zh-hk" will also get - * supported "yue-hk" voices. + * to synthesize this language_code. E.g. when specifying `"en-NZ"`, you will + * get supported `"en-NZ"` voices; when specifying `"no"`, you will get + * supported `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices; + * specifying `"zh"` will also get supported `"cmn-\*"` voices; specifying + * `"zh-hk"` will also get supported `"yue-hk"` voices. * * * string language_code = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -618,11 +618,11 @@ public Builder clearLanguageCode() { * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. * If not specified, the API will return all supported voices. * If specified, the ListVoices call will only return voices that can be used - * to synthesize this language_code. E.g. when specifying "en-NZ", you will - * get supported "en-NZ" voices; when specifying "no", you will get supported - * "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh" - * will also get supported "cmn-\*" voices; specifying "zh-hk" will also get - * supported "yue-hk" voices. + * to synthesize this language_code. E.g. when specifying `"en-NZ"`, you will + * get supported `"en-NZ"` voices; when specifying `"no"`, you will get + * supported `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices; + * specifying `"zh"` will also get supported `"cmn-\*"` voices; specifying + * `"zh-hk"` will also get supported `"yue-hk"` voices. * * * string language_code = 1 [(.google.api.field_behavior) = OPTIONAL]; diff --git a/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/ListVoicesRequestOrBuilder.java b/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/ListVoicesRequestOrBuilder.java index 1bc59e44..66177312 100644 --- a/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/ListVoicesRequestOrBuilder.java +++ b/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/ListVoicesRequestOrBuilder.java @@ -31,11 +31,11 @@ public interface ListVoicesRequestOrBuilder * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. * If not specified, the API will return all supported voices. * If specified, the ListVoices call will only return voices that can be used - * to synthesize this language_code. E.g. when specifying "en-NZ", you will - * get supported "en-NZ" voices; when specifying "no", you will get supported - * "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh" - * will also get supported "cmn-\*" voices; specifying "zh-hk" will also get - * supported "yue-hk" voices. + * to synthesize this language_code. E.g. when specifying `"en-NZ"`, you will + * get supported `"en-NZ"` voices; when specifying `"no"`, you will get + * supported `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices; + * specifying `"zh"` will also get supported `"cmn-\*"` voices; specifying + * `"zh-hk"` will also get supported `"yue-hk"` voices. * * * string language_code = 1 [(.google.api.field_behavior) = OPTIONAL]; @@ -51,11 +51,11 @@ public interface ListVoicesRequestOrBuilder * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. * If not specified, the API will return all supported voices. * If specified, the ListVoices call will only return voices that can be used - * to synthesize this language_code. E.g. when specifying "en-NZ", you will - * get supported "en-NZ" voices; when specifying "no", you will get supported - * "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh" - * will also get supported "cmn-\*" voices; specifying "zh-hk" will also get - * supported "yue-hk" voices. + * to synthesize this language_code. E.g. when specifying `"en-NZ"`, you will + * get supported `"en-NZ"` voices; when specifying `"no"`, you will get + * supported `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices; + * specifying `"zh"` will also get supported `"cmn-\*"` voices; specifying + * `"zh-hk"` will also get supported `"yue-hk"` voices. * * * string language_code = 1 [(.google.api.field_behavior) = OPTIONAL]; diff --git a/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/TextToSpeechProto.java b/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/TextToSpeechProto.java index b399a583..9acd6a6a 100644 --- a/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/TextToSpeechProto.java +++ b/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/TextToSpeechProto.java @@ -55,6 +55,10 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r internal_static_google_cloud_texttospeech_v1beta1_AudioConfig_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_cloud_texttospeech_v1beta1_AudioConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_texttospeech_v1beta1_CustomVoiceParams_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_texttospeech_v1beta1_CustomVoiceParams_fieldAccessorTable; static final com.google.protobuf.Descriptors.Descriptor internal_static_google_cloud_texttospeech_v1beta1_SynthesizeSpeechResponse_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable @@ -76,66 +80,76 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "d_tts.proto\022!google.cloud.texttospeech.v" + "1beta1\032\034google/api/annotations.proto\032\027go" + "ogle/api/client.proto\032\037google/api/field_" - + "behavior.proto\"/\n\021ListVoicesRequest\022\032\n\rl" - + "anguage_code\030\001 \001(\tB\003\340A\001\"N\n\022ListVoicesRes" - + "ponse\0228\n\006voices\030\001 \003(\0132(.google.cloud.tex" - + "ttospeech.v1beta1.Voice\"\231\001\n\005Voice\022\026\n\016lan" - + "guage_codes\030\001 \003(\t\022\014\n\004name\030\002 \001(\t\022G\n\013ssml_" - + "gender\030\003 \001(\01622.google.cloud.texttospeech" - + ".v1beta1.SsmlVoiceGender\022!\n\031natural_samp" - + "le_rate_hertz\030\004 \001(\005\"\240\003\n\027SynthesizeSpeech" - + "Request\022E\n\005input\030\001 \001(\01321.google.cloud.te" - + "xttospeech.v1beta1.SynthesisInputB\003\340A\002\022K" - + "\n\005voice\030\002 \001(\01327.google.cloud.texttospeec" - + "h.v1beta1.VoiceSelectionParamsB\003\340A\002\022I\n\014a" - + "udio_config\030\003 \001(\0132..google.cloud.texttos" - + "peech.v1beta1.AudioConfigB\003\340A\002\022f\n\024enable" - + "_time_pointing\030\004 \003(\0162H.google.cloud.text" - + "tospeech.v1beta1.SynthesizeSpeechRequest" - + ".TimepointType\">\n\rTimepointType\022\036\n\032TIMEP" - + "OINT_TYPE_UNSPECIFIED\020\000\022\r\n\tSSML_MARK\020\001\"@" - + "\n\016SynthesisInput\022\016\n\004text\030\001 \001(\tH\000\022\016\n\004ssml" - + "\030\002 \001(\tH\000B\016\n\014input_source\"\211\001\n\024VoiceSelect" - + "ionParams\022\032\n\rlanguage_code\030\001 \001(\tB\003\340A\002\022\014\n" - + "\004name\030\002 \001(\t\022G\n\013ssml_gender\030\003 \001(\01622.googl" - + "e.cloud.texttospeech.v1beta1.SsmlVoiceGe" - + "nder\"\366\001\n\013AudioConfig\022M\n\016audio_encoding\030\001" - + " \001(\01620.google.cloud.texttospeech.v1beta1" - + ".AudioEncodingB\003\340A\002\022\035\n\rspeaking_rate\030\002 \001" - + "(\001B\006\340A\004\340A\001\022\025\n\005pitch\030\003 \001(\001B\006\340A\004\340A\001\022\036\n\016vol" - + "ume_gain_db\030\004 \001(\001B\006\340A\004\340A\001\022\036\n\021sample_rate" - + "_hertz\030\005 \001(\005B\003\340A\001\022\"\n\022effects_profile_id\030" - + "\006 \003(\tB\006\340A\004\340A\001\"\271\001\n\030SynthesizeSpeechRespon" - + "se\022\025\n\raudio_content\030\001 \001(\014\022@\n\ntimepoints\030" - + "\002 \003(\0132,.google.cloud.texttospeech.v1beta" - + "1.Timepoint\022D\n\014audio_config\030\004 \001(\0132..goog" - + "le.cloud.texttospeech.v1beta1.AudioConfi" - + "g\"4\n\tTimepoint\022\021\n\tmark_name\030\004 \001(\t\022\024\n\014tim" - + "e_seconds\030\003 \001(\001*W\n\017SsmlVoiceGender\022!\n\035SS" - + "ML_VOICE_GENDER_UNSPECIFIED\020\000\022\010\n\004MALE\020\001\022" - + "\n\n\006FEMALE\020\002\022\013\n\007NEUTRAL\020\003*z\n\rAudioEncodin" - + "g\022\036\n\032AUDIO_ENCODING_UNSPECIFIED\020\000\022\014\n\010LIN" - + "EAR16\020\001\022\007\n\003MP3\020\002\022\017\n\013MP3_64_KBPS\020\004\022\014\n\010OGG" - + "_OPUS\020\003\022\t\n\005MULAW\020\005\022\010\n\004ALAW\020\0062\322\003\n\014TextToS" - + "peech\022\242\001\n\nListVoices\0224.google.cloud.text" - + "tospeech.v1beta1.ListVoicesRequest\0325.goo" - + "gle.cloud.texttospeech.v1beta1.ListVoice" - + "sResponse\"\'\202\323\344\223\002\021\022\017/v1beta1/voices\332A\rlan" - + "guage_code\022\313\001\n\020SynthesizeSpeech\022:.google" - + ".cloud.texttospeech.v1beta1.SynthesizeSp" - + "eechRequest\032;.google.cloud.texttospeech." - + "v1beta1.SynthesizeSpeechResponse\">\202\323\344\223\002\035" - + "\"\030/v1beta1/text:synthesize:\001*\332A\030input,vo" - + "ice,audio_config\032O\312A\033texttospeech.google" - + "apis.com\322A.https://www.googleapis.com/au" - + "th/cloud-platformB\375\001\n%com.google.cloud.t" - + "exttospeech.v1beta1B\021TextToSpeechProtoP\001" - + "ZMgoogle.golang.org/genproto/googleapis/" - + "cloud/texttospeech/v1beta1;texttospeech\370" - + "\001\001\252\002!Google.Cloud.TextToSpeech.V1Beta1\312\002" - + "!Google\\Cloud\\TextToSpeech\\V1beta1\352\002$Goo" - + "gle::Cloud::TextToSpeech::V1beta1b\006proto" - + "3" + + "behavior.proto\032\031google/api/resource.prot" + + "o\"/\n\021ListVoicesRequest\022\032\n\rlanguage_code\030" + + "\001 \001(\tB\003\340A\001\"N\n\022ListVoicesResponse\0228\n\006voic" + + "es\030\001 \003(\0132(.google.cloud.texttospeech.v1b" + + "eta1.Voice\"\231\001\n\005Voice\022\026\n\016language_codes\030\001" + + " \003(\t\022\014\n\004name\030\002 \001(\t\022G\n\013ssml_gender\030\003 \001(\0162" + + "2.google.cloud.texttospeech.v1beta1.Ssml" + + "VoiceGender\022!\n\031natural_sample_rate_hertz" + + "\030\004 \001(\005\"\240\003\n\027SynthesizeSpeechRequest\022E\n\005in" + + "put\030\001 \001(\01321.google.cloud.texttospeech.v1" + + "beta1.SynthesisInputB\003\340A\002\022K\n\005voice\030\002 \001(\013" + + "27.google.cloud.texttospeech.v1beta1.Voi" + + "ceSelectionParamsB\003\340A\002\022I\n\014audio_config\030\003" + + " \001(\0132..google.cloud.texttospeech.v1beta1" + + ".AudioConfigB\003\340A\002\022f\n\024enable_time_pointin" + + "g\030\004 \003(\0162H.google.cloud.texttospeech.v1be" + + "ta1.SynthesizeSpeechRequest.TimepointTyp" + + "e\">\n\rTimepointType\022\036\n\032TIMEPOINT_TYPE_UNS" + + "PECIFIED\020\000\022\r\n\tSSML_MARK\020\001\"@\n\016SynthesisIn" + + "put\022\016\n\004text\030\001 \001(\tH\000\022\016\n\004ssml\030\002 \001(\tH\000B\016\n\014i" + + "nput_source\"\325\001\n\024VoiceSelectionParams\022\032\n\r" + + "language_code\030\001 \001(\tB\003\340A\002\022\014\n\004name\030\002 \001(\t\022G" + + "\n\013ssml_gender\030\003 \001(\01622.google.cloud.textt" + + "ospeech.v1beta1.SsmlVoiceGender\022J\n\014custo" + + "m_voice\030\004 \001(\01324.google.cloud.texttospeec" + + "h.v1beta1.CustomVoiceParams\"\366\001\n\013AudioCon" + + "fig\022M\n\016audio_encoding\030\001 \001(\01620.google.clo" + + "ud.texttospeech.v1beta1.AudioEncodingB\003\340" + + "A\002\022\035\n\rspeaking_rate\030\002 \001(\001B\006\340A\004\340A\001\022\025\n\005pit" + + "ch\030\003 \001(\001B\006\340A\004\340A\001\022\036\n\016volume_gain_db\030\004 \001(\001" + + "B\006\340A\004\340A\001\022\036\n\021sample_rate_hertz\030\005 \001(\005B\003\340A\001" + + "\022\"\n\022effects_profile_id\030\006 \003(\tB\006\340A\004\340A\001\"\364\001\n" + + "\021CustomVoiceParams\0222\n\005model\030\001 \001(\tB#\340A\002\372A" + + "\035\n\033automl.googleapis.com/Model\022_\n\016report" + + "ed_usage\030\003 \001(\0162B.google.cloud.texttospee" + + "ch.v1beta1.CustomVoiceParams.ReportedUsa" + + "geB\003\340A\001\"J\n\rReportedUsage\022\036\n\032REPORTED_USA" + + "GE_UNSPECIFIED\020\000\022\014\n\010REALTIME\020\001\022\013\n\007OFFLIN" + + "E\020\002\"\271\001\n\030SynthesizeSpeechResponse\022\025\n\raudi" + + "o_content\030\001 \001(\014\022@\n\ntimepoints\030\002 \003(\0132,.go" + + "ogle.cloud.texttospeech.v1beta1.Timepoin" + + "t\022D\n\014audio_config\030\004 \001(\0132..google.cloud.t" + + "exttospeech.v1beta1.AudioConfig\"4\n\tTimep" + + "oint\022\021\n\tmark_name\030\004 \001(\t\022\024\n\014time_seconds\030" + + "\003 \001(\001*W\n\017SsmlVoiceGender\022!\n\035SSML_VOICE_G" + + "ENDER_UNSPECIFIED\020\000\022\010\n\004MALE\020\001\022\n\n\006FEMALE\020" + + "\002\022\013\n\007NEUTRAL\020\003*z\n\rAudioEncoding\022\036\n\032AUDIO" + + "_ENCODING_UNSPECIFIED\020\000\022\014\n\010LINEAR16\020\001\022\007\n" + + "\003MP3\020\002\022\017\n\013MP3_64_KBPS\020\004\022\014\n\010OGG_OPUS\020\003\022\t\n" + + "\005MULAW\020\005\022\010\n\004ALAW\020\0062\322\003\n\014TextToSpeech\022\242\001\n\n" + + "ListVoices\0224.google.cloud.texttospeech.v" + + "1beta1.ListVoicesRequest\0325.google.cloud." + + "texttospeech.v1beta1.ListVoicesResponse\"" + + "\'\202\323\344\223\002\021\022\017/v1beta1/voices\332A\rlanguage_code" + + "\022\313\001\n\020SynthesizeSpeech\022:.google.cloud.tex" + + "ttospeech.v1beta1.SynthesizeSpeechReques" + + "t\032;.google.cloud.texttospeech.v1beta1.Sy" + + "nthesizeSpeechResponse\">\202\323\344\223\002\035\"\030/v1beta1" + + "/text:synthesize:\001*\332A\030input,voice,audio_" + + "config\032O\312A\033texttospeech.googleapis.com\322A" + + ".https://www.googleapis.com/auth/cloud-p" + + "latformB\325\002\n%com.google.cloud.texttospeec" + + "h.v1beta1B\021TextToSpeechProtoP\001ZMgoogle.g" + + "olang.org/genproto/googleapis/cloud/text" + + "tospeech/v1beta1;texttospeech\370\001\001\252\002!Googl" + + "e.Cloud.TextToSpeech.V1Beta1\312\002!Google\\Cl" + + "oud\\TextToSpeech\\V1beta1\352\002$Google::Cloud" + + "::TextToSpeech::V1beta1\352AU\n\033automl.googl" + + "eapis.com/Model\0226projects/{project}/loca" + + "tions/{location}/models/{model}b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( @@ -144,6 +158,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { com.google.api.AnnotationsProto.getDescriptor(), com.google.api.ClientProto.getDescriptor(), com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.api.ResourceProto.getDescriptor(), }); internal_static_google_cloud_texttospeech_v1beta1_ListVoicesRequest_descriptor = getDescriptor().getMessageTypes().get(0); @@ -191,7 +206,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_texttospeech_v1beta1_VoiceSelectionParams_descriptor, new java.lang.String[] { - "LanguageCode", "Name", "SsmlGender", + "LanguageCode", "Name", "SsmlGender", "CustomVoice", }); internal_static_google_cloud_texttospeech_v1beta1_AudioConfig_descriptor = getDescriptor().getMessageTypes().get(6); @@ -206,8 +221,16 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "SampleRateHertz", "EffectsProfileId", }); - internal_static_google_cloud_texttospeech_v1beta1_SynthesizeSpeechResponse_descriptor = + internal_static_google_cloud_texttospeech_v1beta1_CustomVoiceParams_descriptor = getDescriptor().getMessageTypes().get(7); + internal_static_google_cloud_texttospeech_v1beta1_CustomVoiceParams_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_texttospeech_v1beta1_CustomVoiceParams_descriptor, + new java.lang.String[] { + "Model", "ReportedUsage", + }); + internal_static_google_cloud_texttospeech_v1beta1_SynthesizeSpeechResponse_descriptor = + getDescriptor().getMessageTypes().get(8); internal_static_google_cloud_texttospeech_v1beta1_SynthesizeSpeechResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_texttospeech_v1beta1_SynthesizeSpeechResponse_descriptor, @@ -215,7 +238,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "AudioContent", "Timepoints", "AudioConfig", }); internal_static_google_cloud_texttospeech_v1beta1_Timepoint_descriptor = - getDescriptor().getMessageTypes().get(8); + getDescriptor().getMessageTypes().get(9); internal_static_google_cloud_texttospeech_v1beta1_Timepoint_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_texttospeech_v1beta1_Timepoint_descriptor, @@ -229,11 +252,14 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { registry.add(com.google.api.AnnotationsProto.http); registry.add(com.google.api.ClientProto.methodSignature); registry.add(com.google.api.ClientProto.oauthScopes); + registry.add(com.google.api.ResourceProto.resourceDefinition); + registry.add(com.google.api.ResourceProto.resourceReference); com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( descriptor, registry); com.google.api.AnnotationsProto.getDescriptor(); com.google.api.ClientProto.getDescriptor(); com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.api.ResourceProto.getDescriptor(); } // @@protoc_insertion_point(outer_class_scope) diff --git a/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/VoiceSelectionParams.java b/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/VoiceSelectionParams.java index e5919935..fd3483d0 100644 --- a/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/VoiceSelectionParams.java +++ b/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/VoiceSelectionParams.java @@ -91,6 +91,23 @@ private VoiceSelectionParams( int rawValue = input.readEnum(); ssmlGender_ = rawValue; + break; + } + case 34: + { + com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.Builder subBuilder = null; + if (customVoice_ != null) { + subBuilder = customVoice_.toBuilder(); + } + customVoice_ = + input.readMessage( + com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(customVoice_); + customVoice_ = subBuilder.buildPartial(); + } + break; } default: @@ -293,6 +310,61 @@ public com.google.cloud.texttospeech.v1beta1.SsmlVoiceGender getSsmlGender() { : result; } + public static final int CUSTOM_VOICE_FIELD_NUMBER = 4; + private com.google.cloud.texttospeech.v1beta1.CustomVoiceParams customVoice_; + /** + * + * + *
+   * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+   * the service will choose the custom voice matching the specified
+   * configuration.
+   * 
+ * + * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4; + * + * @return Whether the customVoice field is set. + */ + @java.lang.Override + public boolean hasCustomVoice() { + return customVoice_ != null; + } + /** + * + * + *
+   * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+   * the service will choose the custom voice matching the specified
+   * configuration.
+   * 
+ * + * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4; + * + * @return The customVoice. + */ + @java.lang.Override + public com.google.cloud.texttospeech.v1beta1.CustomVoiceParams getCustomVoice() { + return customVoice_ == null + ? com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.getDefaultInstance() + : customVoice_; + } + /** + * + * + *
+   * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+   * the service will choose the custom voice matching the specified
+   * configuration.
+   * 
+ * + * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4; + */ + @java.lang.Override + public com.google.cloud.texttospeech.v1beta1.CustomVoiceParamsOrBuilder + getCustomVoiceOrBuilder() { + return getCustomVoice(); + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -318,6 +390,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io .getNumber()) { output.writeEnum(3, ssmlGender_); } + if (customVoice_ != null) { + output.writeMessage(4, getCustomVoice()); + } unknownFields.writeTo(output); } @@ -338,6 +413,9 @@ public int getSerializedSize() { .getNumber()) { size += com.google.protobuf.CodedOutputStream.computeEnumSize(3, ssmlGender_); } + if (customVoice_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getCustomVoice()); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -357,6 +435,10 @@ public boolean equals(final java.lang.Object obj) { if (!getLanguageCode().equals(other.getLanguageCode())) return false; if (!getName().equals(other.getName())) return false; if (ssmlGender_ != other.ssmlGender_) return false; + if (hasCustomVoice() != other.hasCustomVoice()) return false; + if (hasCustomVoice()) { + if (!getCustomVoice().equals(other.getCustomVoice())) return false; + } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -374,6 +456,10 @@ public int hashCode() { hash = (53 * hash) + getName().hashCode(); hash = (37 * hash) + SSML_GENDER_FIELD_NUMBER; hash = (53 * hash) + ssmlGender_; + if (hasCustomVoice()) { + hash = (37 * hash) + CUSTOM_VOICE_FIELD_NUMBER; + hash = (53 * hash) + getCustomVoice().hashCode(); + } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -526,6 +612,12 @@ public Builder clear() { ssmlGender_ = 0; + if (customVoiceBuilder_ == null) { + customVoice_ = null; + } else { + customVoice_ = null; + customVoiceBuilder_ = null; + } return this; } @@ -556,6 +648,11 @@ public com.google.cloud.texttospeech.v1beta1.VoiceSelectionParams buildPartial() result.languageCode_ = languageCode_; result.name_ = name_; result.ssmlGender_ = ssmlGender_; + if (customVoiceBuilder_ == null) { + result.customVoice_ = customVoice_; + } else { + result.customVoice_ = customVoiceBuilder_.build(); + } onBuilt(); return result; } @@ -617,6 +714,9 @@ public Builder mergeFrom(com.google.cloud.texttospeech.v1beta1.VoiceSelectionPar if (other.ssmlGender_ != 0) { setSsmlGenderValue(other.getSsmlGenderValue()); } + if (other.hasCustomVoice()) { + mergeCustomVoice(other.getCustomVoice()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -1027,6 +1127,211 @@ public Builder clearSsmlGender() { return this; } + private com.google.cloud.texttospeech.v1beta1.CustomVoiceParams customVoice_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.texttospeech.v1beta1.CustomVoiceParams, + com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.Builder, + com.google.cloud.texttospeech.v1beta1.CustomVoiceParamsOrBuilder> + customVoiceBuilder_; + /** + * + * + *
+     * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+     * the service will choose the custom voice matching the specified
+     * configuration.
+     * 
+ * + * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4; + * + * @return Whether the customVoice field is set. + */ + public boolean hasCustomVoice() { + return customVoiceBuilder_ != null || customVoice_ != null; + } + /** + * + * + *
+     * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+     * the service will choose the custom voice matching the specified
+     * configuration.
+     * 
+ * + * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4; + * + * @return The customVoice. + */ + public com.google.cloud.texttospeech.v1beta1.CustomVoiceParams getCustomVoice() { + if (customVoiceBuilder_ == null) { + return customVoice_ == null + ? com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.getDefaultInstance() + : customVoice_; + } else { + return customVoiceBuilder_.getMessage(); + } + } + /** + * + * + *
+     * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+     * the service will choose the custom voice matching the specified
+     * configuration.
+     * 
+ * + * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4; + */ + public Builder setCustomVoice(com.google.cloud.texttospeech.v1beta1.CustomVoiceParams value) { + if (customVoiceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + customVoice_ = value; + onChanged(); + } else { + customVoiceBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+     * the service will choose the custom voice matching the specified
+     * configuration.
+     * 
+ * + * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4; + */ + public Builder setCustomVoice( + com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.Builder builderForValue) { + if (customVoiceBuilder_ == null) { + customVoice_ = builderForValue.build(); + onChanged(); + } else { + customVoiceBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+     * the service will choose the custom voice matching the specified
+     * configuration.
+     * 
+ * + * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4; + */ + public Builder mergeCustomVoice(com.google.cloud.texttospeech.v1beta1.CustomVoiceParams value) { + if (customVoiceBuilder_ == null) { + if (customVoice_ != null) { + customVoice_ = + com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.newBuilder(customVoice_) + .mergeFrom(value) + .buildPartial(); + } else { + customVoice_ = value; + } + onChanged(); + } else { + customVoiceBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+     * the service will choose the custom voice matching the specified
+     * configuration.
+     * 
+ * + * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4; + */ + public Builder clearCustomVoice() { + if (customVoiceBuilder_ == null) { + customVoice_ = null; + onChanged(); + } else { + customVoice_ = null; + customVoiceBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+     * the service will choose the custom voice matching the specified
+     * configuration.
+     * 
+ * + * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4; + */ + public com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.Builder getCustomVoiceBuilder() { + + onChanged(); + return getCustomVoiceFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+     * the service will choose the custom voice matching the specified
+     * configuration.
+     * 
+ * + * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4; + */ + public com.google.cloud.texttospeech.v1beta1.CustomVoiceParamsOrBuilder + getCustomVoiceOrBuilder() { + if (customVoiceBuilder_ != null) { + return customVoiceBuilder_.getMessageOrBuilder(); + } else { + return customVoice_ == null + ? com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.getDefaultInstance() + : customVoice_; + } + } + /** + * + * + *
+     * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+     * the service will choose the custom voice matching the specified
+     * configuration.
+     * 
+ * + * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.texttospeech.v1beta1.CustomVoiceParams, + com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.Builder, + com.google.cloud.texttospeech.v1beta1.CustomVoiceParamsOrBuilder> + getCustomVoiceFieldBuilder() { + if (customVoiceBuilder_ == null) { + customVoiceBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.texttospeech.v1beta1.CustomVoiceParams, + com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.Builder, + com.google.cloud.texttospeech.v1beta1.CustomVoiceParamsOrBuilder>( + getCustomVoice(), getParentForChildren(), isClean()); + customVoice_ = null; + } + return customVoiceBuilder_; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/VoiceSelectionParamsOrBuilder.java b/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/VoiceSelectionParamsOrBuilder.java index f222fe0b..d1c6eb1d 100644 --- a/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/VoiceSelectionParamsOrBuilder.java +++ b/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/VoiceSelectionParamsOrBuilder.java @@ -127,4 +127,45 @@ public interface VoiceSelectionParamsOrBuilder * @return The ssmlGender. */ com.google.cloud.texttospeech.v1beta1.SsmlVoiceGender getSsmlGender(); + + /** + * + * + *
+   * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+   * the service will choose the custom voice matching the specified
+   * configuration.
+   * 
+ * + * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4; + * + * @return Whether the customVoice field is set. + */ + boolean hasCustomVoice(); + /** + * + * + *
+   * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+   * the service will choose the custom voice matching the specified
+   * configuration.
+   * 
+ * + * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4; + * + * @return The customVoice. + */ + com.google.cloud.texttospeech.v1beta1.CustomVoiceParams getCustomVoice(); + /** + * + * + *
+   * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+   * the service will choose the custom voice matching the specified
+   * configuration.
+   * 
+ * + * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4; + */ + com.google.cloud.texttospeech.v1beta1.CustomVoiceParamsOrBuilder getCustomVoiceOrBuilder(); } diff --git a/proto-google-cloud-texttospeech-v1beta1/src/main/proto/google/cloud/texttospeech/v1beta1/cloud_tts.proto b/proto-google-cloud-texttospeech-v1beta1/src/main/proto/google/cloud/texttospeech/v1beta1/cloud_tts.proto index a70773d7..55a62bf8 100644 --- a/proto-google-cloud-texttospeech-v1beta1/src/main/proto/google/cloud/texttospeech/v1beta1/cloud_tts.proto +++ b/proto-google-cloud-texttospeech-v1beta1/src/main/proto/google/cloud/texttospeech/v1beta1/cloud_tts.proto @@ -19,6 +19,7 @@ package google.cloud.texttospeech.v1beta1; import "google/api/annotations.proto"; import "google/api/client.proto"; import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; option cc_enable_arenas = true; option csharp_namespace = "Google.Cloud.TextToSpeech.V1Beta1"; @@ -28,6 +29,10 @@ option java_outer_classname = "TextToSpeechProto"; option java_package = "com.google.cloud.texttospeech.v1beta1"; option php_namespace = "Google\\Cloud\\TextToSpeech\\V1beta1"; option ruby_package = "Google::Cloud::TextToSpeech::V1beta1"; +option (google.api.resource_definition) = { + type: "automl.googleapis.com/Model" + pattern: "projects/{project}/locations/{location}/models/{model}" +}; // Service that implements Google Cloud Text-to-Speech API. service TextToSpeech { @@ -59,11 +64,11 @@ message ListVoicesRequest { // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. // If not specified, the API will return all supported voices. // If specified, the ListVoices call will only return voices that can be used - // to synthesize this language_code. E.g. when specifying "en-NZ", you will - // get supported "en-NZ" voices; when specifying "no", you will get supported - // "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh" - // will also get supported "cmn-\*" voices; specifying "zh-hk" will also get - // supported "yue-hk" voices. + // to synthesize this language_code. E.g. when specifying `"en-NZ"`, you will + // get supported `"en-NZ"` voices; when specifying `"no"`, you will get + // supported `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices; + // specifying `"zh"` will also get supported `"cmn-\*"` voices; specifying + // `"zh-hk"` will also get supported `"yue-hk"` voices. string language_code = 1 [(google.api.field_behavior) = OPTIONAL]; } @@ -208,6 +213,11 @@ message VoiceSelectionParams { // voice of the appropriate gender is not available, the synthesizer should // substitute a voice with a different gender rather than failing the request. SsmlVoiceGender ssml_gender = 3; + + // The configuration for a custom voice. If [CustomVoiceParams.model] is set, + // the service will choose the custom voice matching the specified + // configuration. + CustomVoiceParams custom_voice = 4; } // Description of audio data to be synthesized. @@ -266,6 +276,37 @@ message AudioConfig { ]; } +// Description of the custom voice to be synthesized. +message CustomVoiceParams { + // The usage of the synthesized audio. You must report your honest and + // correct usage of the service as it's regulated by contract and will cause + // significant difference in billing. + enum ReportedUsage { + // Request with reported usage unspecified will be rejected. + REPORTED_USAGE_UNSPECIFIED = 0; + + // For scenarios where the synthesized audio is not downloadable and can + // only be used once. For example, real-time request in IVR system. + REALTIME = 1; + + // For scenarios where the synthesized audio is downloadable and can be + // reused. For example, the synthesized audio is downloaded, stored in + // customer service system and played repeatedly. + OFFLINE = 2; + } + + // Required. The name of the AutoML model that synthesizes the custom voice. + string model = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/Model" + } + ]; + + // Optional. The usage of the synthesized audio to be reported. + ReportedUsage reported_usage = 3 [(google.api.field_behavior) = OPTIONAL]; +} + // The message returned to the client by the `SynthesizeSpeech` method. message SynthesizeSpeechResponse { // The audio data bytes encoded as specified in the request, including the