Skip to content
This repository has been archived by the owner on Sep 16, 2023. It is now read-only.

Commit

Permalink
samples: update person detection to GA (#344)
Browse files Browse the repository at this point in the history
* feat: update person detection to GA
* fix: missing dependency and changed sample package name
* fix: changed package name for tests
  • Loading branch information
telpirion committed Dec 8, 2020
1 parent 7f583d9 commit 32caca2
Show file tree
Hide file tree
Showing 17 changed files with 222 additions and 194 deletions.
5 changes: 5 additions & 0 deletions samples/install-without-bom/pom.xml
Expand Up @@ -31,6 +31,11 @@
<artifactId>google-cloud-video-intelligence</artifactId>
<version>1.5.5</version>
</dependency>
<dependency>
<groupId>com.google.cloud</groupId>
<artifactId>google-cloud-storage</artifactId>
<version>1.113.4</version>
</dependency>
<!-- [END videointelligence_install_without_bom] -->
<dependency>
<groupId>com.google.cloud</groupId>
Expand Down

Large diffs are not rendered by default.

Expand Up @@ -14,25 +14,25 @@
* limitations under the License.
*/

package beta.video;
package video;

// [START video_detect_person_beta]
// [START video_detect_person]

import com.google.api.gax.longrunning.OperationFuture;
import com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoProgress;
import com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest;
import com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoResponse;
import com.google.cloud.videointelligence.v1p3beta1.DetectedAttribute;
import com.google.cloud.videointelligence.v1p3beta1.DetectedLandmark;
import com.google.cloud.videointelligence.v1p3beta1.Feature;
import com.google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation;
import com.google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig;
import com.google.cloud.videointelligence.v1p3beta1.TimestampedObject;
import com.google.cloud.videointelligence.v1p3beta1.Track;
import com.google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults;
import com.google.cloud.videointelligence.v1p3beta1.VideoContext;
import com.google.cloud.videointelligence.v1p3beta1.VideoIntelligenceServiceClient;
import com.google.cloud.videointelligence.v1p3beta1.VideoSegment;
import com.google.cloud.videointelligence.v1.AnnotateVideoProgress;
import com.google.cloud.videointelligence.v1.AnnotateVideoRequest;
import com.google.cloud.videointelligence.v1.AnnotateVideoResponse;
import com.google.cloud.videointelligence.v1.DetectedAttribute;
import com.google.cloud.videointelligence.v1.DetectedLandmark;
import com.google.cloud.videointelligence.v1.Feature;
import com.google.cloud.videointelligence.v1.PersonDetectionAnnotation;
import com.google.cloud.videointelligence.v1.PersonDetectionConfig;
import com.google.cloud.videointelligence.v1.TimestampedObject;
import com.google.cloud.videointelligence.v1.Track;
import com.google.cloud.videointelligence.v1.VideoAnnotationResults;
import com.google.cloud.videointelligence.v1.VideoContext;
import com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;
import com.google.cloud.videointelligence.v1.VideoSegment;
import com.google.protobuf.ByteString;
import java.nio.file.Files;
import java.nio.file.Path;
Expand Down Expand Up @@ -118,4 +118,4 @@ public static void detectPerson(String localFilePath) throws Exception {
}
}
}
// [END video_detect_person_beta]
// [END video_detect_person]
Expand Up @@ -14,25 +14,25 @@
* limitations under the License.
*/

package beta.video;
package video;

// [START video_detect_person_gcs_beta]
// [START video_detect_person_gcs]

import com.google.api.gax.longrunning.OperationFuture;
import com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoProgress;
import com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest;
import com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoResponse;
import com.google.cloud.videointelligence.v1p3beta1.DetectedAttribute;
import com.google.cloud.videointelligence.v1p3beta1.DetectedLandmark;
import com.google.cloud.videointelligence.v1p3beta1.Feature;
import com.google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation;
import com.google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig;
import com.google.cloud.videointelligence.v1p3beta1.TimestampedObject;
import com.google.cloud.videointelligence.v1p3beta1.Track;
import com.google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults;
import com.google.cloud.videointelligence.v1p3beta1.VideoContext;
import com.google.cloud.videointelligence.v1p3beta1.VideoIntelligenceServiceClient;
import com.google.cloud.videointelligence.v1p3beta1.VideoSegment;
import com.google.cloud.videointelligence.v1.AnnotateVideoProgress;
import com.google.cloud.videointelligence.v1.AnnotateVideoRequest;
import com.google.cloud.videointelligence.v1.AnnotateVideoResponse;
import com.google.cloud.videointelligence.v1.DetectedAttribute;
import com.google.cloud.videointelligence.v1.DetectedLandmark;
import com.google.cloud.videointelligence.v1.Feature;
import com.google.cloud.videointelligence.v1.PersonDetectionAnnotation;
import com.google.cloud.videointelligence.v1.PersonDetectionConfig;
import com.google.cloud.videointelligence.v1.TimestampedObject;
import com.google.cloud.videointelligence.v1.Track;
import com.google.cloud.videointelligence.v1.VideoAnnotationResults;
import com.google.cloud.videointelligence.v1.VideoContext;
import com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;
import com.google.cloud.videointelligence.v1.VideoSegment;

public class DetectPersonGcs {

Expand Down Expand Up @@ -110,4 +110,4 @@ public static void detectPersonGcs(String gcsUri) throws Exception {
}
}
}
// [END video_detect_person_gcs_beta]
// [END video_detect_person_gcs]
Expand Up @@ -14,7 +14,7 @@
* limitations under the License.
*/

package com.example.video;
package video;

// [START video_detect_logo]

Expand Down
Expand Up @@ -14,7 +14,7 @@
* limitations under the License.
*/

package com.example.video;
package video;

// [START video_detect_logo_gcs]

Expand Down
Expand Up @@ -14,7 +14,7 @@
* limitations under the License.
*/

package com.example.video;
package video;

// [START video_quickstart]

Expand All @@ -32,20 +32,19 @@

public class QuickstartSample {

/**
* Demonstrates using the video intelligence client to detect labels in a video file.
*/
/** Demonstrates using the video intelligence client to detect labels in a video file. */
public static void main(String[] args) throws Exception {
// Instantiate a video intelligence client
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// The Google Cloud Storage path to the video to annotate.
String gcsUri = "gs://cloud-samples-data/video/cat.mp4";

// Create an operation that will contain the response when the operation completes.
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder()
.setInputUri(gcsUri)
.addFeatures(Feature.LABEL_DETECTION)
.build();
AnnotateVideoRequest request =
AnnotateVideoRequest.newBuilder()
.setInputUri(gcsUri)
.addFeatures(Feature.LABEL_DETECTION)
.build();

OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> response =
client.annotateVideoAsync(request);
Expand All @@ -61,18 +60,20 @@ public static void main(String[] args) throws Exception {
System.out.println("Labels:");
// get video segment label annotations
for (LabelAnnotation annotation : result.getSegmentLabelAnnotationsList()) {
System.out
.println("Video label description : " + annotation.getEntity().getDescription());
System.out.println(
"Video label description : " + annotation.getEntity().getDescription());
// categories
for (Entity categoryEntity : annotation.getCategoryEntitiesList()) {
System.out.println("Label Category description : " + categoryEntity.getDescription());
}
// segments
for (LabelSegment segment : annotation.getSegmentsList()) {
double startTime = segment.getSegment().getStartTimeOffset().getSeconds()
+ segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
double endTime = segment.getSegment().getEndTimeOffset().getSeconds()
+ segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
double startTime =
segment.getSegment().getStartTimeOffset().getSeconds()
+ segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
double endTime =
segment.getSegment().getEndTimeOffset().getSeconds()
+ segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
System.out.printf("Segment location : %.3f:%.3f\n", startTime, endTime);
System.out.println("Confidence : " + segment.getConfidence());
}
Expand Down
Expand Up @@ -14,7 +14,7 @@
* limitations under the License.
*/

package com.example.video;
package video;

import com.google.api.gax.longrunning.OperationFuture;
import com.google.cloud.videointelligence.v1.AnnotateVideoProgress;
Expand Down Expand Up @@ -51,10 +51,11 @@ public static VideoAnnotationResults detectText(String filePath) throws Exceptio
byte[] data = Files.readAllBytes(path);

// Create the request
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder()
.setInputContent(ByteString.copyFrom(data))
.addFeatures(Feature.TEXT_DETECTION)
.build();
AnnotateVideoRequest request =
AnnotateVideoRequest.newBuilder()
.setInputContent(ByteString.copyFrom(data))
.addFeatures(Feature.TEXT_DETECTION)
.build();

// asynchronously perform object tracking on videos
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future =
Expand All @@ -77,25 +78,29 @@ public static VideoAnnotationResults detectText(String filePath) throws Exceptio
Duration startTimeOffset = videoSegment.getStartTimeOffset();
Duration endTimeOffset = videoSegment.getEndTimeOffset();
// Display the offset times in seconds, 1e9 is part of the formula to convert nanos to seconds
System.out.println(String.format("Start time: %.2f",
startTimeOffset.getSeconds() + startTimeOffset.getNanos() / 1e9));
System.out.println(String.format("End time: %.2f",
endTimeOffset.getSeconds() + endTimeOffset.getNanos() / 1e9));
System.out.println(
String.format(
"Start time: %.2f", startTimeOffset.getSeconds() + startTimeOffset.getNanos() / 1e9));
System.out.println(
String.format(
"End time: %.2f", endTimeOffset.getSeconds() + endTimeOffset.getNanos() / 1e9));

// Show the first result for the first frame in the segment.
TextFrame textFrame = textSegment.getFrames(0);
Duration timeOffset = textFrame.getTimeOffset();
System.out.println(String.format("Time offset for the first frame: %.2f",
timeOffset.getSeconds() + timeOffset.getNanos() / 1e9));
System.out.println(
String.format(
"Time offset for the first frame: %.2f",
timeOffset.getSeconds() + timeOffset.getNanos() / 1e9));

// Display the rotated bounding box for where the text is on the frame.
System.out.println("Rotated Bounding Box Vertices:");
List<NormalizedVertex> vertices = textFrame.getRotatedBoundingBox().getVerticesList();
for (NormalizedVertex normalizedVertex : vertices) {
System.out.println(String.format(
"\tVertex.x: %.2f, Vertex.y: %.2f",
normalizedVertex.getX(),
normalizedVertex.getY()));
System.out.println(
String.format(
"\tVertex.x: %.2f, Vertex.y: %.2f",
normalizedVertex.getX(), normalizedVertex.getY()));
}
return results;
}
Expand All @@ -111,10 +116,11 @@ public static VideoAnnotationResults detectText(String filePath) throws Exceptio
public static VideoAnnotationResults detectTextGcs(String gcsUri) throws Exception {
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Create the request
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder()
.setInputUri(gcsUri)
.addFeatures(Feature.TEXT_DETECTION)
.build();
AnnotateVideoRequest request =
AnnotateVideoRequest.newBuilder()
.setInputUri(gcsUri)
.addFeatures(Feature.TEXT_DETECTION)
.build();

// asynchronously perform object tracking on videos
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future =
Expand All @@ -137,25 +143,29 @@ public static VideoAnnotationResults detectTextGcs(String gcsUri) throws Excepti
Duration startTimeOffset = videoSegment.getStartTimeOffset();
Duration endTimeOffset = videoSegment.getEndTimeOffset();
// Display the offset times in seconds, 1e9 is part of the formula to convert nanos to seconds
System.out.println(String.format("Start time: %.2f",
startTimeOffset.getSeconds() + startTimeOffset.getNanos() / 1e9));
System.out.println(String.format("End time: %.2f",
endTimeOffset.getSeconds() + endTimeOffset.getNanos() / 1e9));
System.out.println(
String.format(
"Start time: %.2f", startTimeOffset.getSeconds() + startTimeOffset.getNanos() / 1e9));
System.out.println(
String.format(
"End time: %.2f", endTimeOffset.getSeconds() + endTimeOffset.getNanos() / 1e9));

// Show the first result for the first frame in the segment.
TextFrame textFrame = textSegment.getFrames(0);
Duration timeOffset = textFrame.getTimeOffset();
System.out.println(String.format("Time offset for the first frame: %.2f",
timeOffset.getSeconds() + timeOffset.getNanos() / 1e9));
System.out.println(
String.format(
"Time offset for the first frame: %.2f",
timeOffset.getSeconds() + timeOffset.getNanos() / 1e9));

// Display the rotated bounding box for where the text is on the frame.
System.out.println("Rotated Bounding Box Vertices:");
List<NormalizedVertex> vertices = textFrame.getRotatedBoundingBox().getVerticesList();
for (NormalizedVertex normalizedVertex : vertices) {
System.out.println(String.format(
"\tVertex.x: %.2f, Vertex.y: %.2f",
normalizedVertex.getX(),
normalizedVertex.getY()));
System.out.println(
String.format(
"\tVertex.x: %.2f, Vertex.y: %.2f",
normalizedVertex.getX(), normalizedVertex.getY()));
}
return results;
}
Expand Down

0 comments on commit 32caca2

Please sign in to comment.