Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add recursiveDelete() to Firestore #622

Merged
merged 10 commits into from May 24, 2021
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
6 changes: 6 additions & 0 deletions google-cloud-firestore/clirr-ignored-differences.xml
Expand Up @@ -252,4 +252,10 @@
<to>*</to>
</difference>

<!-- Recursive Delete -->
thebrianchen marked this conversation as resolved.
Show resolved Hide resolved
<difference>
<differenceType>7012</differenceType>
<className>com/google/cloud/firestore/Firestore</className>
<method>com.google.api.core.ApiFuture recursiveDelete(*)</method>
</difference>
</differences>
Expand Up @@ -805,7 +805,17 @@ public void close() throws InterruptedException, ExecutionException {
flushFuture.get();
}

private void verifyNotClosedLocked() {
/**
* Used for verifying that the BulkWriter instance isn't closed when calling from outside this
* class.
*/
void verifyNotClosed() {
synchronized (lock) {
verifyNotClosedLocked();
}
}

void verifyNotClosedLocked() {
if (this.closed) {
throw new IllegalStateException("BulkWriter has already been closed.");
}
Expand Down
Expand Up @@ -193,6 +193,84 @@ void getAll(
@Nonnull
BulkWriter bulkWriter(BulkWriterOptions options);

/**
* Recursively deletes all documents and subcollections at and under the specified level.
*
* <p>If any delete fails, the ApiFuture contains an error with an error message containing the
* number of failed deletes and the stack trace of the last failed delete. The provided reference
* is deleted regardless of whether all deletes succeeded.
*
* <p>recursiveDelete() uses a {@link BulkWriter} instance with default settings to perform the
* deletes. To customize throttling rates or add success/error callbacks, pass in a custom
* BulkWriter instance.
*
* @param reference The reference of the collection to delete.
* @return An ApiFuture that completes when all deletes have been performed. The future fails with
* an error if any of the deletes fail.
*/
@BetaApi
@Nonnull
ApiFuture<Void> recursiveDelete(CollectionReference reference);

/**
* Recursively deletes all documents and subcollections at and under the specified level.
*
* <p>If any delete fails, the ApiFuture contains an error with an error message containing the
* number of failed deletes and the stack trace of the last failed delete. The provided reference
* is deleted regardless of whether all deletes succeeded.
*
* <p>recursiveDelete() uses a {@link BulkWriter} instance with default settings to perform the
* deletes. To customize throttling rates or add success/error callbacks, pass in a custom
* BulkWriter instance.
*
* @param reference The reference of the collection to delete.
* @param bulkWriter A custom BulkWriter instance used to perform the deletes.
* @return An ApiFuture that completes when all deletes have been performed. The future fails with
* an error if any of the deletes fail.
*/
@BetaApi
@Nonnull
ApiFuture<Void> recursiveDelete(CollectionReference reference, BulkWriter bulkWriter);

/**
* Recursively deletes all documents and subcollections at and under the specified level.
*
* <p>If any delete fails, the ApiFuture contains an error with an error message containing the
* number of failed deletes and the stack trace of the last failed delete. The provided reference
* is deleted regardless of whether all deletes succeeded.
*
* <p>recursiveDelete() uses a {@link BulkWriter} instance with default settings to perform the
* deletes. To customize throttling rates or add success/error callbacks, pass in a custom
* BulkWriter instance.
*
* @param reference The reference of the document to delete.
* @return An ApiFuture that completes when all deletes have been performed. The future fails with
* an error if any of the deletes fail.
*/
@BetaApi
@Nonnull
ApiFuture<Void> recursiveDelete(DocumentReference reference);

/**
* Recursively deletes all documents and subcollections at and under the specified level.
*
* <p>If any delete fails, the ApiFuture contains an error with an error message containing the
* number of failed deletes and the stack trace of the last failed delete. The provided reference
* is deleted regardless of whether all deletes succeeded.
*
* <p>recursiveDelete() uses a {@link BulkWriter} instance with default settings to perform the
* deletes. To customize throttling rates or add success/error callbacks, pass in a custom
* BulkWriter instance.
*
* @param reference The reference of the document to delete.
* @param bulkWriter A custom BulkWriter instance used to perform the deletes.
* @return An ApiFuture that completes when all deletes have been performed. The future fails with
* an error if any of the deletes fail.
*/
@BetaApi
@Nonnull
ApiFuture<Void> recursiveDelete(DocumentReference reference, BulkWriter bulkWriter);

/**
* Returns a FirestoreBundle.Builder {@link FirestoreBundle.Builder} instance using an
* automatically generated bundle ID. When loaded on clients, client SDKs use the bundle ID and
Expand Down
Expand Up @@ -24,6 +24,7 @@
import com.google.api.gax.rpc.UnaryCallable;
import com.google.cloud.Timestamp;
import com.google.cloud.firestore.spi.v1.FirestoreRpc;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.google.firestore.v1.BatchGetDocumentsRequest;
Expand Down Expand Up @@ -59,6 +60,12 @@ class FirestoreImpl implements Firestore, FirestoreRpcContext<FirestoreImpl> {
private final FirestoreOptions firestoreOptions;
private final ResourcePath databasePath;

/**
* A lazy-loaded BulkWriter instance to be used with recursiveDelete() if no BulkWriter instance
* is provided.
*/
@Nullable private BulkWriter bulkWriterInstance;

private boolean closed;

FirestoreImpl(FirestoreOptions options) {
Expand All @@ -76,6 +83,14 @@ class FirestoreImpl implements Firestore, FirestoreRpcContext<FirestoreImpl> {
ResourcePath.create(DatabaseRootName.of(options.getProjectId(), options.getDatabaseId()));
}

/** Lazy-load the Firestore's default BulkWriter. */
private BulkWriter getBulkWriter() {
if (bulkWriterInstance == null) {
bulkWriterInstance = bulkWriter();
}
return bulkWriterInstance;
}

/** Creates a pseudo-random 20-character ID that can be used for Firestore documents. */
static String autoId() {
StringBuilder builder = new StringBuilder();
Expand All @@ -102,6 +117,44 @@ public BulkWriter bulkWriter(BulkWriterOptions options) {
return new BulkWriter(this, options);
}

@Nonnull
public ApiFuture<Void> recursiveDelete(CollectionReference reference) {
BulkWriter writer = getBulkWriter();
RecursiveDelete deleter = new RecursiveDelete(this, writer, reference);
return deleter.run();
}

@Nonnull
public ApiFuture<Void> recursiveDelete(CollectionReference reference, BulkWriter bulkWriter) {
RecursiveDelete deleter = new RecursiveDelete(this, bulkWriter, reference);
return deleter.run();
}

@Nonnull
public ApiFuture<Void> recursiveDelete(DocumentReference reference) {
BulkWriter writer = getBulkWriter();
RecursiveDelete deleter = new RecursiveDelete(this, writer, reference);
return deleter.run();
}

@Nonnull
public ApiFuture<Void> recursiveDelete(DocumentReference reference, BulkWriter bulkWriter) {
RecursiveDelete deleter = new RecursiveDelete(this, bulkWriter, reference);
return deleter.run();
}

/**
* This overload is only used to test the query resumption with startAfter() once the
* RecursiveDelete instance has MAX_PENDING_OPS pending.
*/
@Nonnull
@VisibleForTesting
ApiFuture<Void> recursiveDelete(
CollectionReference reference, BulkWriter bulkWriter, int maxLimit, int minLimit) {
RecursiveDelete deleter = new RecursiveDelete(this, bulkWriter, reference, maxLimit, minLimit);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This code flow would be a bit cleaner if you called this method from all overloads. This will ensue that all calls go through the same callsite. If you take parentPath and collectionId as input here then you will also remove some duplication in RecursiveDelete (as you only need one constructor).

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done. RecursiveDelete only has one constructor now.

return deleter.run();
}

@Nonnull
@Override
public CollectionReference collection(@Nonnull String collectionPath) {
Expand Down
Expand Up @@ -255,13 +255,24 @@ abstract static class QueryOptions {

abstract ImmutableList<FieldReference> getFieldProjections();

// Whether to select all documents under `parentPath`. By default, only
// collections that match `collectionId` are selected.
abstract boolean isKindless();

// Whether to require consistent documents when restarting the query. By
// default, restarting the query uses the readTime offset of the original
// query to provide consistent results.
abstract boolean getRequireConsistency();
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This should technically also not be get, but is doesn't work so you may want to keep it. I also wonder whether it should be "requires".

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

doesRequireConsistency sounds weird. I'll keep it as is to match the node implementation which is requireConsistency.


static Builder builder() {
return new AutoValue_Query_QueryOptions.Builder()
.setAllDescendants(false)
.setLimitType(LimitType.First)
.setFieldOrders(ImmutableList.<FieldOrder>of())
.setFieldFilters(ImmutableList.<FieldFilter>of())
.setFieldProjections(ImmutableList.<FieldReference>of());
.setFieldProjections(ImmutableList.<FieldReference>of())
.setKindless(false)
.setRequireConsistency(true);
}

abstract Builder toBuilder();
Expand Down Expand Up @@ -290,6 +301,10 @@ abstract static class Builder {

abstract Builder setFieldProjections(ImmutableList<FieldReference> value);

abstract Builder setKindless(boolean value);

abstract Builder setRequireConsistency(boolean value);

abstract QueryOptions build();
}
}
Expand Down Expand Up @@ -327,21 +342,21 @@ private static boolean isUnaryComparison(@Nullable Object value) {
/** Computes the backend ordering semantics for DocumentSnapshot cursors. */
private ImmutableList<FieldOrder> createImplicitOrderBy() {
List<FieldOrder> implicitOrders = new ArrayList<>(options.getFieldOrders());
boolean hasDocumentId = false;

// If no explicit ordering is specified, use the first inequality to define an implicit order.
if (implicitOrders.isEmpty()) {
// If no explicit ordering is specified, use the first inequality to define an implicit order.
for (FieldFilter fieldFilter : options.getFieldFilters()) {
if (fieldFilter.isInequalityFilter()) {
implicitOrders.add(new FieldOrder(fieldFilter.fieldReference, Direction.ASCENDING));
break;
}
}
} else {
for (FieldOrder fieldOrder : options.getFieldOrders()) {
if (FieldPath.isDocumentId(fieldOrder.fieldReference.getFieldPath())) {
hasDocumentId = true;
}
}

boolean hasDocumentId = false;
for (FieldOrder fieldOrder : implicitOrders) {
if (FieldPath.isDocumentId(fieldOrder.fieldReference.getFieldPath())) {
hasDocumentId = true;
}
}

Expand Down Expand Up @@ -1237,7 +1252,12 @@ BundledQuery toBundledQuery() {
private StructuredQuery.Builder buildWithoutClientTranslation() {
StructuredQuery.Builder structuredQuery = StructuredQuery.newBuilder();
CollectionSelector.Builder collectionSelector = CollectionSelector.newBuilder();
collectionSelector.setCollectionId(options.getCollectionId());

// Kindless queries select all descendant documents, so we don't add the collectionId field.
if (!options.isKindless()) {
collectionSelector.setCollectionId(options.getCollectionId());
}

collectionSelector.setAllDescendants(options.getAllDescendants());
structuredQuery.addFrom(collectionSelector);

Expand Down Expand Up @@ -1525,10 +1545,17 @@ public void onError(Throwable throwable) {
// since we are requiring at least a single document result.
QueryDocumentSnapshot cursor = lastReceivedDocument.get();
if (cursor != null) {
Query.this
.startAfter(cursor)
.internalStream(
documentObserver, /* transactionId= */ null, cursor.getReadTime());
if (options.getRequireConsistency()) {
Query.this
.startAfter(cursor)
.internalStream(
documentObserver, /* transactionId= */ null, cursor.getReadTime());
} else {
Query.this
.startAfter(cursor)
.internalStream(
documentObserver, /* transactionId= */ null, /* readTime= */ null);
}
}
} else {
Tracing.getTracer().getCurrentSpan().addAnnotation("Firestore.Query: Error");
Expand Down