Skip to content

Commit

Permalink
feat: add recursiveDelete() to Firestore (#622) (#649)
Browse files Browse the repository at this point in the history
  • Loading branch information
Brian Chen committed May 25, 2021
1 parent 8ad6c8e commit 9ff2f41
Show file tree
Hide file tree
Showing 11 changed files with 1,420 additions and 26 deletions.
6 changes: 6 additions & 0 deletions google-cloud-firestore/clirr-ignored-differences.xml
Expand Up @@ -252,4 +252,10 @@
<to>*</to>
</difference>

<!-- Recursive Delete -->
<difference>
<differenceType>7012</differenceType>
<className>com/google/cloud/firestore/Firestore</className>
<method>com.google.api.core.ApiFuture recursiveDelete(*)</method>
</difference>
</differences>
Expand Up @@ -805,7 +805,17 @@ public void close() throws InterruptedException, ExecutionException {
flushFuture.get();
}

private void verifyNotClosedLocked() {
/**
* Used for verifying that the BulkWriter instance isn't closed when calling from outside this
* class.
*/
void verifyNotClosed() {
synchronized (lock) {
verifyNotClosedLocked();
}
}

void verifyNotClosedLocked() {
if (this.closed) {
throw new IllegalStateException("BulkWriter has already been closed.");
}
Expand Down
Expand Up @@ -193,6 +193,84 @@ void getAll(
@Nonnull
BulkWriter bulkWriter(BulkWriterOptions options);

/**
* Recursively deletes all documents and subcollections at and under the specified level.
*
* <p>If any delete fails, the ApiFuture contains an error with an error message containing the
* number of failed deletes and the stack trace of the last failed delete. The provided reference
* is deleted regardless of whether all deletes succeeded.
*
* <p>recursiveDelete() uses a {@link BulkWriter} instance with default settings to perform the
* deletes. To customize throttling rates or add success/error callbacks, pass in a custom
* BulkWriter instance.
*
* @param reference The reference of the collection to delete.
* @return An ApiFuture that completes when all deletes have been performed. The future fails with
* an error if any of the deletes fail.
*/
@BetaApi
@Nonnull
ApiFuture<Void> recursiveDelete(CollectionReference reference);

/**
* Recursively deletes all documents and subcollections at and under the specified level.
*
* <p>If any delete fails, the ApiFuture contains an error with an error message containing the
* number of failed deletes and the stack trace of the last failed delete. The provided reference
* is deleted regardless of whether all deletes succeeded.
*
* <p>recursiveDelete() uses a {@link BulkWriter} instance with default settings to perform the
* deletes. To customize throttling rates or add success/error callbacks, pass in a custom
* BulkWriter instance.
*
* @param reference The reference of the collection to delete.
* @param bulkWriter A custom BulkWriter instance used to perform the deletes.
* @return An ApiFuture that completes when all deletes have been performed. The future fails with
* an error if any of the deletes fail.
*/
@BetaApi
@Nonnull
ApiFuture<Void> recursiveDelete(CollectionReference reference, BulkWriter bulkWriter);

/**
* Recursively deletes all documents and subcollections at and under the specified level.
*
* <p>If any delete fails, the ApiFuture contains an error with an error message containing the
* number of failed deletes and the stack trace of the last failed delete. The provided reference
* is deleted regardless of whether all deletes succeeded.
*
* <p>recursiveDelete() uses a {@link BulkWriter} instance with default settings to perform the
* deletes. To customize throttling rates or add success/error callbacks, pass in a custom
* BulkWriter instance.
*
* @param reference The reference of the document to delete.
* @return An ApiFuture that completes when all deletes have been performed. The future fails with
* an error if any of the deletes fail.
*/
@BetaApi
@Nonnull
ApiFuture<Void> recursiveDelete(DocumentReference reference);

/**
* Recursively deletes all documents and subcollections at and under the specified level.
*
* <p>If any delete fails, the ApiFuture contains an error with an error message containing the
* number of failed deletes and the stack trace of the last failed delete. The provided reference
* is deleted regardless of whether all deletes succeeded.
*
* <p>recursiveDelete() uses a {@link BulkWriter} instance with default settings to perform the
* deletes. To customize throttling rates or add success/error callbacks, pass in a custom
* BulkWriter instance.
*
* @param reference The reference of the document to delete.
* @param bulkWriter A custom BulkWriter instance used to perform the deletes.
* @return An ApiFuture that completes when all deletes have been performed. The future fails with
* an error if any of the deletes fail.
*/
@BetaApi
@Nonnull
ApiFuture<Void> recursiveDelete(DocumentReference reference, BulkWriter bulkWriter);

/**
* Returns a FirestoreBundle.Builder {@link FirestoreBundle.Builder} instance using an
* automatically generated bundle ID. When loaded on clients, client SDKs use the bundle ID and
Expand Down
Expand Up @@ -24,6 +24,7 @@
import com.google.api.gax.rpc.UnaryCallable;
import com.google.cloud.Timestamp;
import com.google.cloud.firestore.spi.v1.FirestoreRpc;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.google.firestore.v1.BatchGetDocumentsRequest;
Expand Down Expand Up @@ -59,6 +60,12 @@ class FirestoreImpl implements Firestore, FirestoreRpcContext<FirestoreImpl> {
private final FirestoreOptions firestoreOptions;
private final ResourcePath databasePath;

/**
* A lazy-loaded BulkWriter instance to be used with recursiveDelete() if no BulkWriter instance
* is provided.
*/
@Nullable private BulkWriter bulkWriterInstance;

private boolean closed;

FirestoreImpl(FirestoreOptions options) {
Expand All @@ -76,6 +83,14 @@ class FirestoreImpl implements Firestore, FirestoreRpcContext<FirestoreImpl> {
ResourcePath.create(DatabaseRootName.of(options.getProjectId(), options.getDatabaseId()));
}

/** Lazy-load the Firestore's default BulkWriter. */
private BulkWriter getBulkWriter() {
if (bulkWriterInstance == null) {
bulkWriterInstance = bulkWriter();
}
return bulkWriterInstance;
}

/** Creates a pseudo-random 20-character ID that can be used for Firestore documents. */
static String autoId() {
StringBuilder builder = new StringBuilder();
Expand All @@ -102,6 +117,47 @@ public BulkWriter bulkWriter(BulkWriterOptions options) {
return new BulkWriter(this, options);
}

@Nonnull
public ApiFuture<Void> recursiveDelete(CollectionReference reference) {
BulkWriter writer = getBulkWriter();
return recursiveDelete(reference.getResourcePath(), writer);
}

@Nonnull
public ApiFuture<Void> recursiveDelete(CollectionReference reference, BulkWriter bulkWriter) {
return recursiveDelete(reference.getResourcePath(), bulkWriter);
}

@Nonnull
public ApiFuture<Void> recursiveDelete(DocumentReference reference) {
BulkWriter writer = getBulkWriter();
return recursiveDelete(reference.getResourcePath(), writer);
}

@Nonnull
public ApiFuture<Void> recursiveDelete(
DocumentReference reference, @Nonnull BulkWriter bulkWriter) {
return recursiveDelete(reference.getResourcePath(), bulkWriter);
}

@Nonnull
public ApiFuture<Void> recursiveDelete(ResourcePath path, BulkWriter bulkWriter) {
return recursiveDelete(
path, bulkWriter, RecursiveDelete.MAX_PENDING_OPS, RecursiveDelete.MIN_PENDING_OPS);
}

/**
* This overload is not private in order to test the query resumption with startAfter() once the
* RecursiveDelete instance has MAX_PENDING_OPS pending.
*/
@Nonnull
@VisibleForTesting
ApiFuture<Void> recursiveDelete(
ResourcePath path, @Nonnull BulkWriter bulkWriter, int maxLimit, int minLimit) {
RecursiveDelete deleter = new RecursiveDelete(this, bulkWriter, path, maxLimit, minLimit);
return deleter.run();
}

@Nonnull
@Override
public CollectionReference collection(@Nonnull String collectionPath) {
Expand Down
Expand Up @@ -255,13 +255,24 @@ abstract static class QueryOptions {

abstract ImmutableList<FieldReference> getFieldProjections();

// Whether to select all documents under `parentPath`. By default, only
// collections that match `collectionId` are selected.
abstract boolean isKindless();

// Whether to require consistent documents when restarting the query. By
// default, restarting the query uses the readTime offset of the original
// query to provide consistent results.
abstract boolean getRequireConsistency();

static Builder builder() {
return new AutoValue_Query_QueryOptions.Builder()
.setAllDescendants(false)
.setLimitType(LimitType.First)
.setFieldOrders(ImmutableList.<FieldOrder>of())
.setFieldFilters(ImmutableList.<FieldFilter>of())
.setFieldProjections(ImmutableList.<FieldReference>of());
.setFieldProjections(ImmutableList.<FieldReference>of())
.setKindless(false)
.setRequireConsistency(true);
}

abstract Builder toBuilder();
Expand Down Expand Up @@ -290,6 +301,10 @@ abstract static class Builder {

abstract Builder setFieldProjections(ImmutableList<FieldReference> value);

abstract Builder setKindless(boolean value);

abstract Builder setRequireConsistency(boolean value);

abstract QueryOptions build();
}
}
Expand Down Expand Up @@ -327,21 +342,21 @@ private static boolean isUnaryComparison(@Nullable Object value) {
/** Computes the backend ordering semantics for DocumentSnapshot cursors. */
private ImmutableList<FieldOrder> createImplicitOrderBy() {
List<FieldOrder> implicitOrders = new ArrayList<>(options.getFieldOrders());
boolean hasDocumentId = false;

// If no explicit ordering is specified, use the first inequality to define an implicit order.
if (implicitOrders.isEmpty()) {
// If no explicit ordering is specified, use the first inequality to define an implicit order.
for (FieldFilter fieldFilter : options.getFieldFilters()) {
if (fieldFilter.isInequalityFilter()) {
implicitOrders.add(new FieldOrder(fieldFilter.fieldReference, Direction.ASCENDING));
break;
}
}
} else {
for (FieldOrder fieldOrder : options.getFieldOrders()) {
if (FieldPath.isDocumentId(fieldOrder.fieldReference.getFieldPath())) {
hasDocumentId = true;
}
}

boolean hasDocumentId = false;
for (FieldOrder fieldOrder : implicitOrders) {
if (FieldPath.isDocumentId(fieldOrder.fieldReference.getFieldPath())) {
hasDocumentId = true;
}
}

Expand Down Expand Up @@ -1237,7 +1252,12 @@ BundledQuery toBundledQuery() {
private StructuredQuery.Builder buildWithoutClientTranslation() {
StructuredQuery.Builder structuredQuery = StructuredQuery.newBuilder();
CollectionSelector.Builder collectionSelector = CollectionSelector.newBuilder();
collectionSelector.setCollectionId(options.getCollectionId());

// Kindless queries select all descendant documents, so we don't add the collectionId field.
if (!options.isKindless()) {
collectionSelector.setCollectionId(options.getCollectionId());
}

collectionSelector.setAllDescendants(options.getAllDescendants());
structuredQuery.addFrom(collectionSelector);

Expand Down Expand Up @@ -1525,10 +1545,17 @@ public void onError(Throwable throwable) {
// since we are requiring at least a single document result.
QueryDocumentSnapshot cursor = lastReceivedDocument.get();
if (cursor != null) {
Query.this
.startAfter(cursor)
.internalStream(
documentObserver, /* transactionId= */ null, cursor.getReadTime());
if (options.getRequireConsistency()) {
Query.this
.startAfter(cursor)
.internalStream(
documentObserver, /* transactionId= */ null, cursor.getReadTime());
} else {
Query.this
.startAfter(cursor)
.internalStream(
documentObserver, /* transactionId= */ null, /* readTime= */ null);
}
}
} else {
Tracing.getTracer().getCurrentSpan().addAnnotation("Firestore.Query: Error");
Expand Down

0 comments on commit 9ff2f41

Please sign in to comment.