Skip to content
This repository has been archived by the owner on Dec 15, 2023. It is now read-only.

docs: generate sample code in the Java microgenerator #427

Merged
merged 1 commit into from Feb 16, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Expand Up @@ -37,6 +37,14 @@
* <p>This class provides the ability to make remote calls to the backing service through method
* calls that map to API methods. Sample code to get started:
*
* <pre>{@code
* try (SpeechClient speechClient = SpeechClient.create()) {
* RecognitionConfig config = RecognitionConfig.newBuilder().build();
* RecognitionAudio audio = RecognitionAudio.newBuilder().build();
* RecognizeResponse response = speechClient.recognize(config, audio);
* }
* }</pre>
*
* <p>Note: close() needs to be called on the SpeechClient object to clean up resources such as
* threads. In the example above, try-with-resources is used, which automatically calls close().
*
Expand Down Expand Up @@ -149,6 +157,16 @@ public final OperationsClient getOperationsClient() {
* Performs synchronous speech recognition: receive results after all audio has been sent and
* processed.
*
* <p>Sample code:
*
* <pre>{@code
* try (SpeechClient speechClient = SpeechClient.create()) {
* RecognitionConfig config = RecognitionConfig.newBuilder().build();
* RecognitionAudio audio = RecognitionAudio.newBuilder().build();
* RecognizeResponse response = speechClient.recognize(config, audio);
* }
* }</pre>
*
* @param config Required. Provides information to the recognizer that specifies how to process
* the request.
* @param audio Required. The audio data to be recognized.
Expand All @@ -165,6 +183,19 @@ public final RecognizeResponse recognize(RecognitionConfig config, RecognitionAu
* Performs synchronous speech recognition: receive results after all audio has been sent and
* processed.
*
* <p>Sample code:
*
* <pre>{@code
* try (SpeechClient speechClient = SpeechClient.create()) {
* RecognizeRequest request =
* RecognizeRequest.newBuilder()
* .setConfig(RecognitionConfig.newBuilder().build())
* .setAudio(RecognitionAudio.newBuilder().build())
* .build();
* RecognizeResponse response = speechClient.recognize(request);
* }
* }</pre>
*
* @param request The request object containing all of the parameters for the API call.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
Expand All @@ -178,6 +209,19 @@ public final RecognizeResponse recognize(RecognizeRequest request) {
* processed.
*
* <p>Sample code:
*
* <pre>{@code
* try (SpeechClient speechClient = SpeechClient.create()) {
* RecognizeRequest request =
* RecognizeRequest.newBuilder()
* .setConfig(RecognitionConfig.newBuilder().build())
* .setAudio(RecognitionAudio.newBuilder().build())
* .build();
* ApiFuture<RecognizeResponse> future = speechClient.recognizeCallable().futureCall(request);
* // Do something.
* RecognizeResponse response = future.get();
* }
* }</pre>
*/
public final UnaryCallable<RecognizeRequest, RecognizeResponse> recognizeCallable() {
return stub.recognizeCallable();
Expand All @@ -190,6 +234,17 @@ public final UnaryCallable<RecognizeRequest, RecognizeResponse> recognizeCallabl
* `LongRunningRecognizeResponse` message. For more information on asynchronous speech
* recognition, see the [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).
*
* <p>Sample code:
*
* <pre>{@code
* try (SpeechClient speechClient = SpeechClient.create()) {
* RecognitionConfig config = RecognitionConfig.newBuilder().build();
* RecognitionAudio audio = RecognitionAudio.newBuilder().build();
* LongRunningRecognizeResponse response =
* speechClient.longRunningRecognizeAsync(config, audio).get();
* }
* }</pre>
*
* @param config Required. Provides information to the recognizer that specifies how to process
* the request.
* @param audio Required. The audio data to be recognized.
Expand All @@ -209,6 +264,19 @@ public final UnaryCallable<RecognizeRequest, RecognizeResponse> recognizeCallabl
* `LongRunningRecognizeResponse` message. For more information on asynchronous speech
* recognition, see the [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).
*
* <p>Sample code:
*
* <pre>{@code
* try (SpeechClient speechClient = SpeechClient.create()) {
* LongRunningRecognizeRequest request =
* LongRunningRecognizeRequest.newBuilder()
* .setConfig(RecognitionConfig.newBuilder().build())
* .setAudio(RecognitionAudio.newBuilder().build())
* .build();
* LongRunningRecognizeResponse response = speechClient.longRunningRecognizeAsync(request).get();
* }
* }</pre>
*
* @param request The request object containing all of the parameters for the API call.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
Expand All @@ -225,6 +293,20 @@ public final UnaryCallable<RecognizeRequest, RecognizeResponse> recognizeCallabl
* recognition, see the [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).
*
* <p>Sample code:
*
* <pre>{@code
* try (SpeechClient speechClient = SpeechClient.create()) {
* LongRunningRecognizeRequest request =
* LongRunningRecognizeRequest.newBuilder()
* .setConfig(RecognitionConfig.newBuilder().build())
* .setAudio(RecognitionAudio.newBuilder().build())
* .build();
* OperationFuture<LongRunningRecognizeResponse, LongRunningRecognizeMetadata> future =
* speechClient.longRunningRecognizeOperationCallable().futureCall(request);
* // Do something.
* LongRunningRecognizeResponse response = future.get();
* }
* }</pre>
*/
public final OperationCallable<
LongRunningRecognizeRequest, LongRunningRecognizeResponse, LongRunningRecognizeMetadata>
Expand All @@ -240,6 +322,19 @@ public final UnaryCallable<RecognizeRequest, RecognizeResponse> recognizeCallabl
* recognition, see the [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).
*
* <p>Sample code:
*
* <pre>{@code
* try (SpeechClient speechClient = SpeechClient.create()) {
* LongRunningRecognizeRequest request =
* LongRunningRecognizeRequest.newBuilder()
* .setConfig(RecognitionConfig.newBuilder().build())
* .setAudio(RecognitionAudio.newBuilder().build())
* .build();
* ApiFuture<Operation> future = speechClient.longRunningRecognizeCallable().futureCall(request);
* // Do something.
* Operation response = future.get();
* }
* }</pre>
*/
public final UnaryCallable<LongRunningRecognizeRequest, Operation>
longRunningRecognizeCallable() {
Expand All @@ -252,6 +347,18 @@ public final UnaryCallable<RecognizeRequest, RecognizeResponse> recognizeCallabl
* method is only available via the gRPC API (not REST).
*
* <p>Sample code:
*
* <pre>{@code
* try (SpeechClient speechClient = SpeechClient.create()) {
* BidiStream<StreamingRecognizeRequest, StreamingRecognizeResponse> bidiStream =
* speechClient.streamingRecognizeCallable().call();
* StreamingRecognizeRequest request = StreamingRecognizeRequest.newBuilder().build();
* bidiStream.send(request);
* for (StreamingRecognizeResponse response : bidiStream) {
* // Do something when a response is received.
* }
* }
* }</pre>
*/
public final BidiStreamingCallable<StreamingRecognizeRequest, StreamingRecognizeResponse>
streamingRecognizeCallable() {
Expand Down
Expand Up @@ -22,6 +22,14 @@
* <p>Service Description: Service that implements Google Cloud Speech API.
*
* <p>Sample for SpeechClient:
*
* <pre>{@code
* try (SpeechClient speechClient = SpeechClient.create()) {
* RecognitionConfig config = RecognitionConfig.newBuilder().build();
* RecognitionAudio audio = RecognitionAudio.newBuilder().build();
* RecognizeResponse response = speechClient.recognize(config, audio);
* }
* }</pre>
*/
@Generated("by gapic-generator-java")
package com.google.cloud.speech.v1;
Expand Down
Expand Up @@ -37,6 +37,14 @@
* <p>This class provides the ability to make remote calls to the backing service through method
* calls that map to API methods. Sample code to get started:
*
* <pre>{@code
* try (SpeechClient speechClient = SpeechClient.create()) {
* RecognitionConfig config = RecognitionConfig.newBuilder().build();
* RecognitionAudio audio = RecognitionAudio.newBuilder().build();
* RecognizeResponse response = speechClient.recognize(config, audio);
* }
* }</pre>
*
* <p>Note: close() needs to be called on the SpeechClient object to clean up resources such as
* threads. In the example above, try-with-resources is used, which automatically calls close().
*
Expand Down Expand Up @@ -150,6 +158,16 @@ public final OperationsClient getOperationsClient() {
* Performs synchronous speech recognition: receive results after all audio has been sent and
* processed.
*
* <p>Sample code:
*
* <pre>{@code
* try (SpeechClient speechClient = SpeechClient.create()) {
* RecognitionConfig config = RecognitionConfig.newBuilder().build();
* RecognitionAudio audio = RecognitionAudio.newBuilder().build();
* RecognizeResponse response = speechClient.recognize(config, audio);
* }
* }</pre>
*
* @param config Required. Provides information to the recognizer that specifies how to process
* the request.
* @param audio Required. The audio data to be recognized.
Expand All @@ -166,6 +184,19 @@ public final RecognizeResponse recognize(RecognitionConfig config, RecognitionAu
* Performs synchronous speech recognition: receive results after all audio has been sent and
* processed.
*
* <p>Sample code:
*
* <pre>{@code
* try (SpeechClient speechClient = SpeechClient.create()) {
* RecognizeRequest request =
* RecognizeRequest.newBuilder()
* .setConfig(RecognitionConfig.newBuilder().build())
* .setAudio(RecognitionAudio.newBuilder().build())
* .build();
* RecognizeResponse response = speechClient.recognize(request);
* }
* }</pre>
*
* @param request The request object containing all of the parameters for the API call.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
Expand All @@ -179,6 +210,19 @@ public final RecognizeResponse recognize(RecognizeRequest request) {
* processed.
*
* <p>Sample code:
*
* <pre>{@code
* try (SpeechClient speechClient = SpeechClient.create()) {
* RecognizeRequest request =
* RecognizeRequest.newBuilder()
* .setConfig(RecognitionConfig.newBuilder().build())
* .setAudio(RecognitionAudio.newBuilder().build())
* .build();
* ApiFuture<RecognizeResponse> future = speechClient.recognizeCallable().futureCall(request);
* // Do something.
* RecognizeResponse response = future.get();
* }
* }</pre>
*/
public final UnaryCallable<RecognizeRequest, RecognizeResponse> recognizeCallable() {
return stub.recognizeCallable();
Expand All @@ -191,6 +235,17 @@ public final UnaryCallable<RecognizeRequest, RecognizeResponse> recognizeCallabl
* `LongRunningRecognizeResponse` message. For more information on asynchronous speech
* recognition, see the [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).
*
* <p>Sample code:
*
* <pre>{@code
* try (SpeechClient speechClient = SpeechClient.create()) {
* RecognitionConfig config = RecognitionConfig.newBuilder().build();
* RecognitionAudio audio = RecognitionAudio.newBuilder().build();
* LongRunningRecognizeResponse response =
* speechClient.longRunningRecognizeAsync(config, audio).get();
* }
* }</pre>
*
* @param config Required. Provides information to the recognizer that specifies how to process
* the request.
* @param audio Required. The audio data to be recognized.
Expand All @@ -210,6 +265,19 @@ public final UnaryCallable<RecognizeRequest, RecognizeResponse> recognizeCallabl
* `LongRunningRecognizeResponse` message. For more information on asynchronous speech
* recognition, see the [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).
*
* <p>Sample code:
*
* <pre>{@code
* try (SpeechClient speechClient = SpeechClient.create()) {
* LongRunningRecognizeRequest request =
* LongRunningRecognizeRequest.newBuilder()
* .setConfig(RecognitionConfig.newBuilder().build())
* .setAudio(RecognitionAudio.newBuilder().build())
* .build();
* LongRunningRecognizeResponse response = speechClient.longRunningRecognizeAsync(request).get();
* }
* }</pre>
*
* @param request The request object containing all of the parameters for the API call.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
Expand All @@ -226,6 +294,20 @@ public final UnaryCallable<RecognizeRequest, RecognizeResponse> recognizeCallabl
* recognition, see the [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).
*
* <p>Sample code:
*
* <pre>{@code
* try (SpeechClient speechClient = SpeechClient.create()) {
* LongRunningRecognizeRequest request =
* LongRunningRecognizeRequest.newBuilder()
* .setConfig(RecognitionConfig.newBuilder().build())
* .setAudio(RecognitionAudio.newBuilder().build())
* .build();
* OperationFuture<LongRunningRecognizeResponse, LongRunningRecognizeMetadata> future =
* speechClient.longRunningRecognizeOperationCallable().futureCall(request);
* // Do something.
* LongRunningRecognizeResponse response = future.get();
* }
* }</pre>
*/
public final OperationCallable<
LongRunningRecognizeRequest, LongRunningRecognizeResponse, LongRunningRecognizeMetadata>
Expand All @@ -241,6 +323,19 @@ public final UnaryCallable<RecognizeRequest, RecognizeResponse> recognizeCallabl
* recognition, see the [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).
*
* <p>Sample code:
*
* <pre>{@code
* try (SpeechClient speechClient = SpeechClient.create()) {
* LongRunningRecognizeRequest request =
* LongRunningRecognizeRequest.newBuilder()
* .setConfig(RecognitionConfig.newBuilder().build())
* .setAudio(RecognitionAudio.newBuilder().build())
* .build();
* ApiFuture<Operation> future = speechClient.longRunningRecognizeCallable().futureCall(request);
* // Do something.
* Operation response = future.get();
* }
* }</pre>
*/
public final UnaryCallable<LongRunningRecognizeRequest, Operation>
longRunningRecognizeCallable() {
Expand All @@ -253,6 +348,18 @@ public final UnaryCallable<RecognizeRequest, RecognizeResponse> recognizeCallabl
* method is only available via the gRPC API (not REST).
*
* <p>Sample code:
*
* <pre>{@code
* try (SpeechClient speechClient = SpeechClient.create()) {
* BidiStream<StreamingRecognizeRequest, StreamingRecognizeResponse> bidiStream =
* speechClient.streamingRecognizeCallable().call();
* StreamingRecognizeRequest request = StreamingRecognizeRequest.newBuilder().build();
* bidiStream.send(request);
* for (StreamingRecognizeResponse response : bidiStream) {
* // Do something when a response is received.
* }
* }
* }</pre>
*/
public final BidiStreamingCallable<StreamingRecognizeRequest, StreamingRecognizeResponse>
streamingRecognizeCallable() {
Expand Down
Expand Up @@ -22,6 +22,14 @@
* <p>Service Description: Service that implements Google Cloud Speech API.
*
* <p>Sample for SpeechClient:
*
* <pre>{@code
* try (SpeechClient speechClient = SpeechClient.create()) {
* RecognitionConfig config = RecognitionConfig.newBuilder().build();
* RecognitionAudio audio = RecognitionAudio.newBuilder().build();
* RecognizeResponse response = speechClient.recognize(config, audio);
* }
* }</pre>
*/
@Generated("by gapic-generator-java")
package com.google.cloud.speech.v1p1beta1;
Expand Down
10 changes: 5 additions & 5 deletions synth.metadata
Expand Up @@ -4,23 +4,23 @@
"git": {
"name": ".",
"remote": "https://github.com/googleapis/java-speech.git",
"sha": "11e873e9ff047bb02881be8633c047e091fb27ce"
"sha": "c99e52c7e034fd061f5ce5f16991a9aaa6e46bd6"
}
},
{
"git": {
"name": "googleapis",
"remote": "https://github.com/googleapis/googleapis.git",
"sha": "91e206bcfeaf8948ea03fe3cb1b7616108496cd3",
"internalRef": "350949863"
"sha": "8d8c008e56f1af31d57f75561e0f1848ffb29eeb",
"internalRef": "356341083"
}
},
{
"git": {
"name": "googleapis",
"remote": "https://github.com/googleapis/googleapis.git",
"sha": "91e206bcfeaf8948ea03fe3cb1b7616108496cd3",
"internalRef": "350949863"
"sha": "8d8c008e56f1af31d57f75561e0f1848ffb29eeb",
"internalRef": "356341083"
}
},
{
Expand Down