diff --git a/google-cloud-bigtable/clirr-ignored-differences.xml b/google-cloud-bigtable/clirr-ignored-differences.xml
index ab921a973..9391d2ea8 100644
--- a/google-cloud-bigtable/clirr-ignored-differences.xml
+++ b/google-cloud-bigtable/clirr-ignored-differences.xml
@@ -23,4 +23,10 @@
8001com/google/cloud/bigtable/gaxx/tracing/WrappedTracerFactory*
-
\ No newline at end of file
+
+
+ 7004
+ com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub
+ *
+
+
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClient.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClient.java
index 04e1b1598..ce9a57fa7 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClient.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClient.java
@@ -23,6 +23,7 @@
import com.google.api.core.BetaApi;
import com.google.api.core.InternalApi;
import com.google.api.gax.batching.Batcher;
+import com.google.api.gax.grpc.GrpcCallContext;
import com.google.api.gax.rpc.ApiExceptions;
import com.google.api.gax.rpc.ResponseObserver;
import com.google.api.gax.rpc.ServerStream;
@@ -1073,7 +1074,40 @@ public void bulkMutateRows(BulkMutation mutation) {
*/
@BetaApi("This surface is likely to change as the batching surface evolves.")
public Batcher newBulkMutationBatcher(@Nonnull String tableId) {
- return stub.newMutateRowsBatcher(tableId);
+ return newBulkMutationBatcher(tableId, null);
+ }
+
+ /**
+ * Mutates multiple rows in a batch. Each individual row is mutated atomically as in MutateRow,
+ * but the entire batch is not executed atomically. The returned Batcher instance is not
+ * threadsafe, it can only be used from single thread. This method allows customization of the
+ * underlying RPCs by passing in a {@link com.google.api.gax.grpc.GrpcCallContext}. The same
+ * context will be reused for all batches. This can be used to customize things like per attempt
+ * timeouts.
+ *
+ *
Sample Code:
+ *
+ *
{@code
+ * try (BigtableDataClient bigtableDataClient = BigtableDataClient.create("[PROJECT]", "[INSTANCE]")) {
+ * try (Batcher batcher = bigtableDataClient.newBulkMutationBatcher("[TABLE]", GrpcCallContext.createDefault().withTimeout(Duration.ofSeconds(10)))) {
+ * for (String someValue : someCollection) {
+ * ApiFuture entryFuture =
+ * batcher.add(
+ * RowMutationEntry.create("[ROW KEY]")
+ * .setCell("[FAMILY NAME]", "[QUALIFIER]", "[VALUE]"));
+ * }
+ *
+ * // Blocks until mutations are applied on all submitted row entries.
+ * batcher.flush();
+ * }
+ * // Before `batcher` is closed, all remaining(If any) mutations are applied.
+ * }
+ * }
+ */
+ @BetaApi("This surface is likely to change as the batching surface evolves.")
+ public Batcher newBulkMutationBatcher(
+ @Nonnull String tableId, @Nullable GrpcCallContext ctx) {
+ return stub.newMutateRowsBatcher(tableId, ctx);
}
/**
@@ -1159,11 +1193,61 @@ public Batcher newBulkReadRowsBatcher(String tableId) {
*/
public Batcher newBulkReadRowsBatcher(
String tableId, @Nullable Filters.Filter filter) {
+ return newBulkReadRowsBatcher(tableId, filter, null);
+ }
+
+ /**
+ * Reads rows for given tableId and filter criteria in a batch. If the row does not exist, the
+ * value will be null. The returned Batcher instance is not threadsafe, it can only be used from a
+ * single thread. This method allows customization of the underlying RPCs by passing in a {@link
+ * com.google.api.gax.grpc.GrpcCallContext}. The same context will be reused for all batches. This
+ * can be used to customize things like per attempt timeouts.
+ *
+ *
Performance notice: The ReadRows protocol requires that rows are sent in ascending key
+ * order, which means that the keys are processed sequentially on the server-side, so batching
+ * allows improving throughput but not latency. Lower latencies can be achieved by sending smaller
+ * requests concurrently.
+ *
+ *
Sample Code:
+ *
+ *
{@code
+ * try (BigtableDataClient bigtableDataClient = BigtableDataClient.create("[PROJECT]", "[INSTANCE]")) {
+ *
+ * // Build the filter expression
+ * Filter filter = FILTERS.chain()
+ * .filter(FILTERS.key().regex("prefix.*"))
+ * .filter(FILTERS.limit().cellsPerRow(10));
+ *
+ * List> rows = new ArrayList<>();
+ *
+ * try (Batcher batcher = bigtableDataClient.newBulkReadRowsBatcher(
+ * "[TABLE]", filter, GrpcCallContext.createDefault().withTimeout(Duration.ofSeconds(10)))) {
+ * for (String someValue : someCollection) {
+ * ApiFuture rowFuture =
+ * batcher.add(ByteString.copyFromUtf8("[ROW KEY]"));
+ * rows.add(rowFuture);
+ * }
+ *
+ * // [Optional] Sends collected elements for batching asynchronously.
+ * batcher.sendOutstanding();
+ *
+ * // [Optional] Invokes sendOutstanding() and awaits until all pending entries are resolved.
+ * batcher.flush();
+ * }
+ * // batcher.close() invokes `flush()` which will in turn invoke `sendOutstanding()` with await for
+ * pending batches until its resolved.
+ *
+ * List actualRows = ApiFutures.allAsList(rows).get();
+ * }
+ * }