Skip to content

Commit

Permalink
chore: make BulkWriter package-private (#330)
Browse files Browse the repository at this point in the history
Co-authored-by: BenWhitehead <BenWhitehead@users.noreply.github.com>
  • Loading branch information
Brian Chen and BenWhitehead committed Aug 14, 2020
1 parent a2d27df commit ef0869a
Show file tree
Hide file tree
Showing 5 changed files with 4 additions and 120 deletions.
Expand Up @@ -42,7 +42,7 @@
import javax.annotation.Nonnull;
import javax.annotation.Nullable;

public final class BulkWriter implements AutoCloseable {
final class BulkWriter implements AutoCloseable {
/** The maximum number of writes that can be in a single batch. */
public static final int MAX_BATCH_SIZE = 500;

Expand Down
Expand Up @@ -19,7 +19,7 @@
import javax.annotation.Nonnull;

/** Options used to disable request throttling in BulkWriter. */
public final class BulkWriterOptions {
final class BulkWriterOptions {

private final boolean enableThrottling;

Expand Down
Expand Up @@ -168,12 +168,6 @@ void getAll(
@Nonnull
WriteBatch batch();

@Nonnull
BulkWriter bulkWriter();

@Nonnull
BulkWriter bulkWriter(BulkWriterOptions options);

/**
* Closes the gRPC channels associated with this instance and frees up their resources. This
* method blocks until all channels are closed. Once this method is called, this Firestore client
Expand Down
Expand Up @@ -93,14 +93,12 @@ public WriteBatch batch() {
}

@Nonnull
@Override
public BulkWriter bulkWriter() {
BulkWriter bulkWriter() {
return new BulkWriter(this, /* enableThrottling= */ true);
}

@Nonnull
@Override
public BulkWriter bulkWriter(BulkWriterOptions options) {
BulkWriter bulkWriter(BulkWriterOptions options) {
return new BulkWriter(this, options.isThrottlingEnabled());
}

Expand Down
Expand Up @@ -34,7 +34,6 @@
import com.google.api.core.SettableApiFuture;
import com.google.api.gax.rpc.ApiStreamObserver;
import com.google.cloud.Timestamp;
import com.google.cloud.firestore.BulkWriter;
import com.google.cloud.firestore.CollectionReference;
import com.google.cloud.firestore.DocumentReference;
import com.google.cloud.firestore.DocumentSnapshot;
Expand Down Expand Up @@ -1316,113 +1315,6 @@ public void deleteNestedFieldUsingFieldPath() throws Exception {
assertNull(documentSnapshots.getData().get("c.d"));
}

@Test
public void bulkWriterCreate() throws Exception {
DocumentReference docRef = randomColl.document();

BulkWriter writer = firestore.bulkWriter();
ApiFuture<WriteResult> result =
writer.create(docRef, Collections.singletonMap("foo", (Object) "bar"));
writer.close();

assertNotNull(result.get().getUpdateTime());
DocumentSnapshot snapshot = docRef.get().get();
assertEquals("bar", snapshot.get("foo"));
}

@Test
public void bulkWriterCreateAddsPrecondition() throws Exception {
DocumentReference docRef = randomColl.document();
docRef.set(Collections.singletonMap("foo", (Object) "bar")).get();

BulkWriter writer = firestore.bulkWriter();
ApiFuture<WriteResult> result =
writer.create(docRef, Collections.singletonMap("foo", (Object) "bar"));
writer.close();

try {
result.get();
fail("Create operation should have thrown exception");
} catch (Exception e) {
assertTrue(e.getMessage().contains("Document already exists"));
}
}

@Test
public void bulkWriterSet() throws Exception {
DocumentReference docRef = randomColl.document();

BulkWriter writer = firestore.bulkWriter();
ApiFuture<WriteResult> result =
writer.set(docRef, Collections.singletonMap("foo", (Object) "bar"));
writer.close();

assertNotNull(result.get().getUpdateTime());
DocumentSnapshot snapshot = docRef.get().get();
assertEquals("bar", snapshot.get("foo"));
}

@Test
public void bulkWriterUpdate() throws Exception {
DocumentReference docRef = randomColl.document();
docRef.set(Collections.singletonMap("foo", "oldValue")).get();

BulkWriter writer = firestore.bulkWriter();
ApiFuture<WriteResult> result = writer.update(docRef, "foo", "newValue");
writer.close();

assertNotNull(result.get().getUpdateTime());
DocumentSnapshot snapshot = docRef.get().get();
assertEquals("newValue", snapshot.get("foo"));
}

@Test
public void bulkWriterUpdateAddsPrecondition() throws Exception {
DocumentReference docRef = randomColl.document();

BulkWriter writer = firestore.bulkWriter();
ApiFuture<WriteResult> result = writer.update(docRef, "foo", "newValue");
writer.close();

try {
result.get();
fail("Update operation should have thrown exception");
} catch (Exception e) {
assertTrue(e.getMessage().contains("No document to update"));
}
}

@Test
public void bulkWriterDelete() throws Exception {
DocumentReference docRef = randomColl.document();
docRef.set(Collections.singletonMap("foo", "oldValue")).get();

BulkWriter writer = firestore.bulkWriter();
ApiFuture<WriteResult> result = writer.delete(docRef);
writer.close();

assertNotNull(result.get().getUpdateTime());
// TODO(b/158502664): Remove this check once we can get write times.
assertEquals(Timestamp.ofTimeSecondsAndNanos(0, 0), result.get().getUpdateTime());
DocumentSnapshot snapshot = docRef.get().get();
assertNull(snapshot.get("foo"));
}

@Test
public void bulkWriterWritesInOrder() throws Exception {
DocumentReference docRef = randomColl.document();
docRef.set(Collections.singletonMap("foo", "oldValue")).get();

BulkWriter writer = firestore.bulkWriter();
writer.set(docRef, Collections.singletonMap("foo", (Object) "bar1"));
writer.set(docRef, Collections.singletonMap("foo", (Object) "bar2"));
writer.set(docRef, Collections.singletonMap("foo", (Object) "bar3"));
writer.close();

ApiFuture<DocumentSnapshot> result = docRef.get();
assertEquals(Collections.singletonMap("foo", "bar3"), result.get().getData());
}

@Test
public void readOnlyTransaction_successfulGet()
throws ExecutionException, InterruptedException, TimeoutException {
Expand Down

0 comments on commit ef0869a

Please sign in to comment.