Skip to content
This repository has been archived by the owner on Dec 15, 2023. It is now read-only.

Commit

Permalink
chore: update common templates, regenerate tests
Browse files Browse the repository at this point in the history
  • Loading branch information
yoshi-automation authored and chingor13 committed Nov 19, 2019
1 parent a9c3c30 commit c0e747e
Show file tree
Hide file tree
Showing 24 changed files with 139 additions and 34 deletions.
1 change: 1 addition & 0 deletions .github/release-please.yml
@@ -1 +1,2 @@
releaseType: java-yoshi
bumpMinorPreMajor: true
14 changes: 10 additions & 4 deletions .kokoro/build.sh
Expand Up @@ -27,6 +27,7 @@ echo ${JOB_TYPE}
mvn install -B -V \
-DskipTests=true \
-Dclirr.skip=true \
-Denforcer.skip=true \
-Dmaven.javadoc.skip=true \
-Dgcloud.download.skip=true \
-T 1C
Expand All @@ -38,7 +39,7 @@ fi

case ${JOB_TYPE} in
test)
mvn test -B -Dclirr.skip=true
mvn test -B -Dclirr.skip=true -Denforcer.skip=true
bash ${KOKORO_GFILE_DIR}/codecov.sh
bash .kokoro/coerce_logs.sh
;;
Expand All @@ -49,12 +50,17 @@ javadoc)
mvn javadoc:javadoc javadoc:test-javadoc
;;
integration)
mvn -B ${INTEGRATION_TEST_ARGS} -DtrimStackTrace=false -Dclirr.skip=true -fae verify
mvn -B ${INTEGRATION_TEST_ARGS} \
-DtrimStackTrace=false \
-Dclirr.skip=true \
-Denforcer.skip=true \
-fae \
verify
bash .kokoro/coerce_logs.sh
;;
clirr)
mvn -B clirr:check
mvn -B -Denforcer.skip=true clirr:check
;;
*)
;;
esac
esac
6 changes: 5 additions & 1 deletion .kokoro/dependencies.sh
Expand Up @@ -23,5 +23,9 @@ echo $JOB_TYPE

export MAVEN_OPTS="-Xmx1024m -XX:MaxPermSize=128m"

mvn install -DskipTests=true -B -V
# this should run maven enforcer
mvn install -B -V \
-DskipTests=true \
-Dclirr.skip=true

mvn -B dependency:analyze -DfailOnWarning=true
Expand Up @@ -34,6 +34,7 @@
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.ExecutionException;
import org.junit.After;
import org.junit.AfterClass;
Expand All @@ -53,7 +54,8 @@ public class SpeechClientTest {
public static void startStaticServer() {
mockSpeech = new MockSpeech();
serviceHelper =
new MockServiceHelper("in-process-1", Arrays.<MockGrpcService>asList(mockSpeech));
new MockServiceHelper(
UUID.randomUUID().toString(), Arrays.<MockGrpcService>asList(mockSpeech));
serviceHelper.start();
}

Expand Down
Expand Up @@ -34,6 +34,7 @@
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.ExecutionException;
import org.junit.After;
import org.junit.AfterClass;
Expand All @@ -53,7 +54,8 @@ public class SpeechClientTest {
public static void startStaticServer() {
mockSpeech = new MockSpeech();
serviceHelper =
new MockServiceHelper("in-process-1", Arrays.<MockGrpcService>asList(mockSpeech));
new MockServiceHelper(
UUID.randomUUID().toString(), Arrays.<MockGrpcService>asList(mockSpeech));
serviceHelper.start();
}

Expand Down
Expand Up @@ -59,14 +59,19 @@ public class SpeechTranscribeAsync {
* import java.nio.file.Paths;
*/

public static void sampleLongRunningRecognize() {
// TODO(developer): Replace these variables before running the sample.
String localFilePath = "resources/brooklyn_bridge.raw";
sampleLongRunningRecognize(localFilePath);
}

/**
* Transcribe a long audio file using asynchronous speech recognition
*
* @param localFilePath Path to local audio file, e.g. /path/audio.wav
*/
public static void sampleLongRunningRecognize(String localFilePath) {
try (SpeechClient speechClient = SpeechClient.create()) {
// localFilePath = "resources/brooklyn_bridge.raw";

// The language of the supplied audio
String languageCode = "en-US";
Expand Down
Expand Up @@ -51,14 +51,19 @@ public class SpeechTranscribeAsyncGcs {
* import com.google.cloud.speech.v1.SpeechRecognitionResult;
*/

public static void sampleLongRunningRecognize() {
// TODO(developer): Replace these variables before running the sample.
String storageUri = "gs://cloud-samples-data/speech/brooklyn_bridge.raw";
sampleLongRunningRecognize(storageUri);
}

/**
* Transcribe long audio file from Cloud Storage using asynchronous speech recognition
*
* @param storageUri URI for audio file in Cloud Storage, e.g. gs://[BUCKET]/[FILE]
*/
public static void sampleLongRunningRecognize(String storageUri) {
try (SpeechClient speechClient = SpeechClient.create()) {
// storageUri = "gs://cloud-samples-data/speech/brooklyn_bridge.raw";

// Sample rate in Hertz of the audio data sent
int sampleRateHertz = 16000;
Expand Down
Expand Up @@ -53,14 +53,19 @@ public class SpeechTranscribeAsyncWordTimeOffsetsGcs {
* import com.google.cloud.speech.v1.WordInfo;
*/

public static void sampleLongRunningRecognize() {
// TODO(developer): Replace these variables before running the sample.
String storageUri = "gs://cloud-samples-data/speech/brooklyn_bridge.flac";
sampleLongRunningRecognize(storageUri);
}

/**
* Print start and end time of each word spoken in audio file from Cloud Storage
*
* @param storageUri URI for audio file in Cloud Storage, e.g. gs://[BUCKET]/[FILE]
*/
public static void sampleLongRunningRecognize(String storageUri) {
try (SpeechClient speechClient = SpeechClient.create()) {
// storageUri = "gs://cloud-samples-data/speech/brooklyn_bridge.flac";

// When enabled, the first result returned by the API will include a list
// of words and the start and end time offsets (timestamps) for those words.
Expand Down
Expand Up @@ -55,14 +55,19 @@ public class SpeechTranscribeEnhancedModel {
* import java.nio.file.Paths;
*/

public static void sampleRecognize() {
// TODO(developer): Replace these variables before running the sample.
String localFilePath = "resources/hello.wav";
sampleRecognize(localFilePath);
}

/**
* Transcribe a short audio file using an enhanced model
*
* @param localFilePath Path to local audio file, e.g. /path/audio.wav
*/
public static void sampleRecognize(String localFilePath) {
try (SpeechClient speechClient = SpeechClient.create()) {
// localFilePath = "resources/hello.wav";

// The enhanced model to use, e.g. phone_call
// Currently phone_call is the only model available as an enhanced model.
Expand Down
Expand Up @@ -55,6 +55,13 @@ public class SpeechTranscribeModelSelection {
* import java.nio.file.Paths;
*/

public static void sampleRecognize() {
// TODO(developer): Replace these variables before running the sample.
String localFilePath = "resources/hello.wav";
String model = "phone_call";
sampleRecognize(localFilePath, model);
}

/**
* Transcribe a short audio file using a specified transcription model
*
Expand All @@ -65,8 +72,6 @@ public class SpeechTranscribeModelSelection {
*/
public static void sampleRecognize(String localFilePath, String model) {
try (SpeechClient speechClient = SpeechClient.create()) {
// localFilePath = "resources/hello.wav";
// model = "phone_call";

// The language of the supplied audio
String languageCode = "en-US";
Expand Down
Expand Up @@ -47,6 +47,13 @@ public class SpeechTranscribeModelSelectionGcs {
* import com.google.cloud.speech.v1.SpeechRecognitionResult;
*/

public static void sampleRecognize() {
// TODO(developer): Replace these variables before running the sample.
String storageUri = "gs://cloud-samples-data/speech/hello.wav";
String model = "phone_call";
sampleRecognize(storageUri, model);
}

/**
* Transcribe a short audio file from Cloud Storage using a specified transcription model
*
Expand All @@ -57,8 +64,6 @@ public class SpeechTranscribeModelSelectionGcs {
*/
public static void sampleRecognize(String storageUri, String model) {
try (SpeechClient speechClient = SpeechClient.create()) {
// storageUri = "gs://cloud-samples-data/speech/hello.wav";
// model = "phone_call";

// The language of the supplied audio
String languageCode = "en-US";
Expand Down
Expand Up @@ -55,14 +55,19 @@ public class SpeechTranscribeMultichannel {
* import java.nio.file.Paths;
*/

public static void sampleRecognize() {
// TODO(developer): Replace these variables before running the sample.
String localFilePath = "resources/multi.wav";
sampleRecognize(localFilePath);
}

/**
* Transcribe a short audio file with multiple channels
*
* @param localFilePath Path to local audio file, e.g. /path/audio.wav
*/
public static void sampleRecognize(String localFilePath) {
try (SpeechClient speechClient = SpeechClient.create()) {
// localFilePath = "resources/multi.wav";

// The number of channels in the input audio file (optional)
int audioChannelCount = 2;
Expand Down
Expand Up @@ -47,14 +47,19 @@ public class SpeechTranscribeMultichannelGcs {
* import com.google.cloud.speech.v1.SpeechRecognitionResult;
*/

public static void sampleRecognize() {
// TODO(developer): Replace these variables before running the sample.
String storageUri = "gs://cloud-samples-data/speech/multi.wav";
sampleRecognize(storageUri);
}

/**
* Transcribe a short audio file from Cloud Storage with multiple channels
*
* @param storageUri URI for audio file in Cloud Storage, e.g. gs://[BUCKET]/[FILE]
*/
public static void sampleRecognize(String storageUri) {
try (SpeechClient speechClient = SpeechClient.create()) {
// storageUri = "gs://cloud-samples-data/speech/multi.wav";

// The number of channels in the input audio file (optional)
int audioChannelCount = 2;
Expand Down
Expand Up @@ -55,14 +55,19 @@ public class SpeechTranscribeSync {
* import java.nio.file.Paths;
*/

public static void sampleRecognize() {
// TODO(developer): Replace these variables before running the sample.
String localFilePath = "resources/brooklyn_bridge.raw";
sampleRecognize(localFilePath);
}

/**
* Transcribe a short audio file using synchronous speech recognition
*
* @param localFilePath Path to local audio file, e.g. /path/audio.wav
*/
public static void sampleRecognize(String localFilePath) {
try (SpeechClient speechClient = SpeechClient.create()) {
// localFilePath = "resources/brooklyn_bridge.raw";

// The language of the supplied audio
String languageCode = "en-US";
Expand Down
Expand Up @@ -47,14 +47,19 @@ public class SpeechTranscribeSyncGcs {
* import com.google.cloud.speech.v1.SpeechRecognitionResult;
*/

public static void sampleRecognize() {
// TODO(developer): Replace these variables before running the sample.
String storageUri = "gs://cloud-samples-data/speech/brooklyn_bridge.raw";
sampleRecognize(storageUri);
}

/**
* Transcribe short audio file from Cloud Storage using synchronous speech recognition
*
* @param storageUri URI for audio file in Cloud Storage, e.g. gs://[BUCKET]/[FILE]
*/
public static void sampleRecognize(String storageUri) {
try (SpeechClient speechClient = SpeechClient.create()) {
// storageUri = "gs://cloud-samples-data/speech/brooklyn_bridge.raw";

// Sample rate in Hertz of the audio data sent
int sampleRateHertz = 16000;
Expand Down
Expand Up @@ -53,6 +53,13 @@ public class SpeechAdaptationBeta {
* import java.util.List;
*/

public static void sampleRecognize() {
// TODO(developer): Replace these variables before running the sample.
String storageUri = "gs://cloud-samples-data/speech/brooklyn_bridge.mp3";
String phrase = "Brooklyn Bridge";
sampleRecognize(storageUri, phrase);
}

/**
* Transcribe a short audio file with speech adaptation.
*
Expand All @@ -61,8 +68,6 @@ public class SpeechAdaptationBeta {
*/
public static void sampleRecognize(String storageUri, String phrase) {
try (SpeechClient speechClient = SpeechClient.create()) {
// storageUri = "gs://cloud-samples-data/speech/brooklyn_bridge.mp3";
// phrase = "Brooklyn Bridge";
List<String> phrases = Arrays.asList(phrase);

// Hint Boost. This value increases the probability that a specific
Expand Down
Expand Up @@ -53,6 +53,13 @@ public class SpeechContextsClassesBeta {
* import java.util.List;
*/

public static void sampleRecognize() {
// TODO(developer): Replace these variables before running the sample.
String storageUri = "gs://cloud-samples-data/speech/time.mp3";
String phrase = "$TIME";
sampleRecognize(storageUri, phrase);
}

/**
* Transcribe a short audio file with static context classes.
*
Expand All @@ -63,8 +70,6 @@ public class SpeechContextsClassesBeta {
*/
public static void sampleRecognize(String storageUri, String phrase) {
try (SpeechClient speechClient = SpeechClient.create()) {
// storageUri = "gs://cloud-samples-data/speech/time.mp3";
// phrase = "$TIME";
List<String> phrases = Arrays.asList(phrase);
SpeechContext speechContextsElement =
SpeechContext.newBuilder().addAllPhrases(phrases).build();
Expand Down
Expand Up @@ -47,14 +47,19 @@ public class SpeechQuickstartBeta {
* import com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult;
*/

public static void sampleRecognize() {
// TODO(developer): Replace these variables before running the sample.
String storageUri = "gs://cloud-samples-data/speech/brooklyn_bridge.mp3";
sampleRecognize(storageUri);
}

/**
* Performs synchronous speech recognition on an audio file
*
* @param storageUri URI for audio file in Cloud Storage, e.g. gs://[BUCKET]/[FILE]
*/
public static void sampleRecognize(String storageUri) {
try (SpeechClient speechClient = SpeechClient.create()) {
// storageUri = "gs://cloud-samples-data/speech/brooklyn_bridge.mp3";

// The language of the supplied audio
String languageCode = "en-US";
Expand Down
Expand Up @@ -55,14 +55,19 @@ public class SpeechTranscribeAutoPunctuationBeta {
* import java.nio.file.Paths;
*/

public static void sampleRecognize() {
// TODO(developer): Replace these variables before running the sample.
String localFilePath = "resources/commercial_mono.wav";
sampleRecognize(localFilePath);
}

/**
* Transcribe a short audio file with punctuation
*
* @param localFilePath Path to local audio file, e.g. /path/audio.wav
*/
public static void sampleRecognize(String localFilePath) {
try (SpeechClient speechClient = SpeechClient.create()) {
// localFilePath = "resources/commercial_mono.wav";

// When enabled, trascription results may include punctuation
// (available for select languages).
Expand Down

0 comments on commit c0e747e

Please sign in to comment.