Skip to content
This repository has been archived by the owner on Sep 16, 2023. It is now read-only.

Commit

Permalink
media-translation-sample: translate from file (#15)
Browse files Browse the repository at this point in the history
* media-translation: translate from the file

* formatted code

* added translate from the mic

* added test for translate from file

* made requested changes

* added README, added Translate with main

* deleted Translate and updated README

* added single utterance

* duplicate the resource folder

* added main and removed catch

* trigger tests

* fixed the lint

* media-translation: translate from the file

* formatted code

* trigger kokoro tests

* removed weird file
  • Loading branch information
munkhuushmgl committed Apr 13, 2020
1 parent 3b96cbe commit 70b51e4
Show file tree
Hide file tree
Showing 8 changed files with 340 additions and 0 deletions.
Binary file added samples/install-without-bom/resources/audio.raw
Binary file not shown.
Binary file added samples/snapshot/resources/audio.raw
Binary file not shown.
50 changes: 50 additions & 0 deletions samples/snippets/README.md
@@ -0,0 +1,50 @@
[//]: # "This README.md file is auto-generated, all changes to this file will be lost."
[//]: # "To regenerate it, use `python -m synthtool`."
<img src="https://avatars2.githubusercontent.com/u/2810941?v=3&s=96" alt="Google Cloud Platform logo" title="Google Cloud Platform" align="right" height="96" width="96"/>

# [Cloud Media Translation: Java Samples](https://github.com/googleapis/java-mediatranslation)

[![Open in Cloud Shell][shell_img]][shell_link]



## Table of Contents

* [Build the sample](#build-the-sample)
* [Samples](#samples)


## Build the sample

Install [Maven](http://maven.apache.org/).

Build your project with:

```
mvn clean package -DskipTests
```

## Samples

Please follow the [Set Up Your Project](https://cloud.google.com/media-translation/docs/getting-started#set_up_your_project)
steps in the Quickstart doc to create a project and enable the Google Cloud
Media Translation API. Following those steps, make sure that you
[Set Up a Service Account](https://cloud.google.com/media-translation/docs/common/auth#set_up_a_service_account),
and export the following environment variable:

```
export GOOGLE_APPLICATION_CREDENTIALS=/path/to/your-project-credentials.json
```

After you have authorized, you can translate media.


## Run
Run all tests:
```
mvn clean verify
```

[shell_img]: https://gstatic.com/cloudssh/images/open-btn.png
[shell_link]: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-mediatranslation&page=editor&open_in_editor=samples/snippets/README.md
[product-docs]: https://cloud.google.com/mediatranslation/docs/
1 change: 1 addition & 0 deletions samples/snippets/pom.xml
Expand Up @@ -43,4 +43,5 @@
<scope>test</scope>
</dependency>
</dependencies>

</project>
Binary file added samples/snippets/resources/audio.raw
Binary file not shown.
@@ -0,0 +1,98 @@
/*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package com.example.mediatranslation;

// [START media_translation_translate_from_file]

import com.google.api.gax.rpc.BidiStream;
import com.google.cloud.mediatranslation.v1beta1.SpeechTranslationServiceClient;
import com.google.cloud.mediatranslation.v1beta1.StreamingTranslateSpeechConfig;
import com.google.cloud.mediatranslation.v1beta1.StreamingTranslateSpeechRequest;
import com.google.cloud.mediatranslation.v1beta1.StreamingTranslateSpeechResponse;
import com.google.cloud.mediatranslation.v1beta1.StreamingTranslateSpeechResult;
import com.google.cloud.mediatranslation.v1beta1.TranslateSpeechConfig;
import com.google.protobuf.ByteString;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;

public class TranslateFromFile {

public static void translateFromFile() throws IOException {
// TODO(developer): Replace these variables before running the sample.
String filePath = "path/to/audio.raw";
translateFromFile(filePath);
}

public static void translateFromFile(String filePath) throws IOException {
// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests. After completing all of your requests, call
// the "close" method on the client to safely clean up any remaining background resources.
try (SpeechTranslationServiceClient client = SpeechTranslationServiceClient.create()) {
Path path = Paths.get(filePath);
byte[] content = Files.readAllBytes(path);
ByteString audioContent = ByteString.copyFrom(content);

TranslateSpeechConfig audioConfig =
TranslateSpeechConfig.newBuilder()
.setAudioEncoding("linear16")
.setSampleRateHertz(16000)
.setSourceLanguageCode("en-US")
.setTargetLanguageCode("fr-FR")
.build();

StreamingTranslateSpeechConfig config =
StreamingTranslateSpeechConfig.newBuilder()
.setAudioConfig(audioConfig)
.setSingleUtterance(true)
.build();

BidiStream<StreamingTranslateSpeechRequest, StreamingTranslateSpeechResponse> bidiStream =
client.streamingTranslateSpeechCallable().call();

// The first request contains the configuration.
StreamingTranslateSpeechRequest requestConfig =
StreamingTranslateSpeechRequest.newBuilder().setStreamingConfig(config).build();

// The second request contains the audio
StreamingTranslateSpeechRequest request =
StreamingTranslateSpeechRequest.newBuilder().setAudioContent(audioContent).build();

bidiStream.send(requestConfig);
bidiStream.send(request);

for (StreamingTranslateSpeechResponse response : bidiStream) {
// Once the transcription settles, the response contains the
// is_final result. The other results will be for subsequent portions of
// the audio.
StreamingTranslateSpeechResult res = response.getResult();
String translation = res.getTextTranslationResult().getTranslation();
String source = res.getRecognitionResult();

if (res.getTextTranslationResult().getIsFinal()) {
System.out.println(String.format("\nFinal translation: %s", translation));
System.out.println(String.format("Final recognition result: %s", source));
break;
}
System.out.println(String.format("\nPartial translation: %s", translation));
System.out.println(String.format("Partial recognition result: %s", source));
}
}
}
}
// [END media_translation_translate_from_file]
@@ -0,0 +1,135 @@
/*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package com.example.mediatranslation;

// [START media_translation_translate_from_mic]

import com.google.api.gax.rpc.ClientStream;
import com.google.api.gax.rpc.ResponseObserver;
import com.google.api.gax.rpc.StreamController;
import com.google.cloud.mediatranslation.v1beta1.SpeechTranslationServiceClient;
import com.google.cloud.mediatranslation.v1beta1.StreamingTranslateSpeechConfig;
import com.google.cloud.mediatranslation.v1beta1.StreamingTranslateSpeechRequest;
import com.google.cloud.mediatranslation.v1beta1.StreamingTranslateSpeechResponse;
import com.google.cloud.mediatranslation.v1beta1.StreamingTranslateSpeechResult;
import com.google.cloud.mediatranslation.v1beta1.TranslateSpeechConfig;
import com.google.protobuf.ByteString;
import java.io.IOException;
import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioInputStream;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.DataLine;
import javax.sound.sampled.LineUnavailableException;
import javax.sound.sampled.TargetDataLine;

public class TranslateFromMic {

public static void main(String[] args) throws IOException, LineUnavailableException {
translateFromMic();
}

public static void translateFromMic() throws IOException, LineUnavailableException {

ResponseObserver<StreamingTranslateSpeechResponse> responseObserver = null;

// Initialize client that will be used to send requests. This client only needs to be created
// once, and can be reused for multiple requests. After completing all of your requests, call
// the "close" method on the client to safely clean up any remaining background resources.
try (SpeechTranslationServiceClient client = SpeechTranslationServiceClient.create()) {
responseObserver =
new ResponseObserver<StreamingTranslateSpeechResponse>() {

@Override
public void onStart(StreamController controller) {}

@Override
public void onResponse(StreamingTranslateSpeechResponse response) {
StreamingTranslateSpeechResult res = response.getResult();
String translation = res.getTextTranslationResult().getTranslation();
String source = res.getRecognitionResult();

if (res.getTextTranslationResult().getIsFinal()) {
System.out.println(String.format("\nFinal translation: %s", translation));
System.out.println(String.format("Final recognition result: %s", source));
} else {
System.out.println(String.format("\nPartial translation: %s", translation));
System.out.println(String.format("Partial recognition result: %s", source));
}
}

@Override
public void onComplete() {}

public void onError(Throwable t) {
System.out.println(t);
}
};

ClientStream<StreamingTranslateSpeechRequest> clientStream =
client.streamingTranslateSpeechCallable().splitCall(responseObserver);

TranslateSpeechConfig audioConfig =
TranslateSpeechConfig.newBuilder()
.setAudioEncoding("linear16")
.setSourceLanguageCode("en-US")
.setTargetLanguageCode("es-ES")
.setSampleRateHertz(16000)
.build();

StreamingTranslateSpeechConfig streamingRecognitionConfig =
StreamingTranslateSpeechConfig.newBuilder().setAudioConfig(audioConfig).build();

StreamingTranslateSpeechRequest request =
StreamingTranslateSpeechRequest.newBuilder()
.setStreamingConfig(streamingRecognitionConfig)
.build(); // The first request in a streaming call has to be a config

clientStream.send(request);
// SampleRate:16000Hz, SampleSizeInBits: 16, Number of channels: 1, Signed: true,
// bigEndian: false
AudioFormat audioFormat = new AudioFormat(16000, 16, 1, true, false);
DataLine.Info targetInfo =
new DataLine.Info(
TargetDataLine.class,
audioFormat); // Set the system information to read from the microphone audio stream

if (!AudioSystem.isLineSupported(targetInfo)) {
System.out.println("Microphone not supported");
System.exit(0);
}
// Target data line captures the audio stream the microphone produces.
TargetDataLine targetDataLine = (TargetDataLine) AudioSystem.getLine(targetInfo);
targetDataLine.open(audioFormat);
targetDataLine.start();
System.out.println("Start speaking... Press Ctrl-C to stop");
long startTime = System.currentTimeMillis();
// Audio Input Stream
AudioInputStream audio = new AudioInputStream(targetDataLine);

while (true) {
byte[] data = new byte[6400];
audio.read(data);
request =
StreamingTranslateSpeechRequest.newBuilder()
.setAudioContent(ByteString.copyFrom(data))
.build();
clientStream.send(request);
}
}
}
}
// [END media_translation_translate_from_mic]
@@ -0,0 +1,56 @@
/*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package com.example.mediatranslation;

import static com.google.common.truth.Truth.assertThat;

import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;

@RunWith(JUnit4.class)
public class TranslateFromFileTest {

private ByteArrayOutputStream bout;

@Before
public void setUp() {
bout = new ByteArrayOutputStream();
System.setOut(new PrintStream(bout));
}

@After
public void tearDown() {
System.setOut(null);
bout.reset();
}

@Test
public void testTranslateFromFile() throws IOException {
// Call translateFromFile to print out the translated output.
TranslateFromFile.translateFromFile("resources/audio.raw");
String output = bout.toString();

// Check that the output contain some translation.
assertThat(output).contains("Partial translation");
}
}

0 comments on commit 70b51e4

Please sign in to comment.