Skip to content

Commit

Permalink
fix(1.113.14-sp): backport critical bug fixes (#993)
Browse files Browse the repository at this point in the history
* feat: Remove client side vaildation for lifecycle conditions (#816)

* Remove client side vaildation for lifecycle conditions

* fix lint and suggest updating

(cherry picked from commit 5ec84cc)

* fix: update BucketInfo translation code to properly handle lifecycle rules (#852)

Fixes #850

(cherry picked from commit 3b1df1d)

* fix: improve error detection and reporting for BlobWriteChannel retry state (#846)

Add new checks to ensure a more informative error than NullPointerException is thrown if the StorageObject or it's size are unable to be resolved on the last chunk.

Fixes #839

(cherry picked from commit d0f2184)

* fix: correct lastChunk retry logic in BlobWriteChannel (#918)

Add new method StorageRpc#queryResumableUpload which allows getting a shallow StorageObject for a resumable upload session which is complete.

Update BlobWriteChannel to use StoageRpc#queryResumableUpload instead of StorageRpc#get when attempting to validate the upload size of an object when it determines the upload is complete and is on the last chunk.

If a BlobWriteChannel is opened with a conditional like IfGenerationMatch it is not possible to simply get the object, as the object can drift generationally while the resumable upload is being performed.

Related to #839

(cherry picked from commit ab0228c)

* test: remove error string matching (#861)

It looks like the text for this error on the backend has changed
(sometimes) from "Precondition Failed" to "At least one of the
pre-conditions you specified did not hold". I don't think it's
really necessary to check the exact message in any case given
that we do check for a code of 412, which implies a precondition
failure. I added a check of the error Reason instead,  which is more
standardized.

Fixes #853

(cherry picked from commit 146a3d3)

Co-authored-by: JesseLovelace <43148100+JesseLovelace@users.noreply.github.com>
Co-authored-by: Chris Cotter <cjcotter@google.com>
  • Loading branch information
3 people committed Sep 28, 2021
1 parent 8614975 commit 447d35d
Show file tree
Hide file tree
Showing 9 changed files with 408 additions and 61 deletions.
5 changes: 5 additions & 0 deletions google-cloud-storage/clirr-ignored-differences.xml
Expand Up @@ -31,4 +31,9 @@
<method>long getCurrentUploadOffset(java.lang.String)</method>
<differenceType>7012</differenceType>
</difference>
<difference>
<className>com/google/cloud/storage/spi/v1/StorageRpc</className>
<method>com.google.api.services.storage.model.StorageObject queryCompletedResumableUpload(java.lang.String, long)</method>
<differenceType>7012</differenceType>
</difference>
</differences>
Expand Up @@ -25,7 +25,7 @@
import com.google.cloud.RetryHelper;
import com.google.cloud.WriteChannel;
import com.google.cloud.storage.spi.v1.StorageRpc;
import com.google.common.collect.Maps;
import java.math.BigInteger;
import java.net.URL;
import java.util.Map;
import java.util.concurrent.Callable;
Expand Down Expand Up @@ -77,20 +77,52 @@ private long getRemotePosition() {
return getOptions().getStorageRpcV1().getCurrentUploadOffset(getUploadId());
}

private StorageObject getRemoteStorageObject() {
return getOptions()
.getStorageRpcV1()
.get(getEntity().toPb(), Maps.newEnumMap(StorageRpc.Option.class));
private static StorageException unrecoverableState(
String uploadId,
int chunkOffset,
int chunkLength,
long localPosition,
long remotePosition,
boolean last) {
return unrecoverableState(
uploadId,
chunkOffset,
chunkLength,
localPosition,
remotePosition,
last,
"Unable to recover in upload.\nThis may be a symptom of multiple clients uploading to the same upload session.");
}

private static StorageException errorResolvingMetadataLastChunk(
String uploadId,
int chunkOffset,
int chunkLength,
long localPosition,
long remotePosition,
boolean last) {
return unrecoverableState(
uploadId,
chunkOffset,
chunkLength,
localPosition,
remotePosition,
last,
"Unable to load object metadata to determine if last chunk was successfully written");
}

private StorageException unrecoverableState(
int chunkOffset, int chunkLength, long localPosition, long remotePosition, boolean last) {
private static StorageException unrecoverableState(
String uploadId,
int chunkOffset,
int chunkLength,
long localPosition,
long remotePosition,
boolean last,
String message) {
StringBuilder sb = new StringBuilder();
sb.append("Unable to recover in upload.\n");
sb.append(
"This may be a symptom of multiple clients uploading to the same upload session.\n\n");
sb.append(message).append("\n\n");
sb.append("For debugging purposes:\n");
sb.append("uploadId: ").append(getUploadId()).append('\n');
sb.append("uploadId: ").append(uploadId).append('\n');
sb.append("chunkOffset: ").append(chunkOffset).append('\n');
sb.append("chunkLength: ").append(chunkLength).append('\n');
sb.append("localOffset: ").append(localPosition).append('\n');
Expand Down Expand Up @@ -162,7 +194,7 @@ public void run() {
// Get remote offset from API
final long localPosition = getPosition();
// For each request it should be possible to retry from its location in this code
final long remotePosition = isRetrying() ? getRemotePosition() : getPosition();
final long remotePosition = isRetrying() ? getRemotePosition() : localPosition;
final int chunkOffset = (int) (remotePosition - localPosition);
final int chunkLength = length - chunkOffset;
final boolean uploadAlreadyComplete = remotePosition == -1;
Expand All @@ -173,13 +205,45 @@ public void run() {
if (uploadAlreadyComplete && lastChunk) {
// Case 6
// Request object metadata if not available
long totalBytes = getPosition() + length;
if (storageObject == null) {
storageObject = getRemoteStorageObject();
storageObject =
getOptions()
.getStorageRpcV1()
.queryCompletedResumableUpload(getUploadId(), totalBytes);
}
// the following checks are defined here explicitly to provide a more
// informative if either storageObject is unable to be resolved or it's size is
// unable to be determined. This scenario is a very rare case of failure that
// can arise when packets are lost.
if (storageObject == null) {
throw errorResolvingMetadataLastChunk(
getUploadId(),
chunkOffset,
chunkLength,
localPosition,
remotePosition,
lastChunk);
}
// Verify that with the final chunk we match the blob length
if (storageObject.getSize().longValue() != getPosition() + length) {
BigInteger size = storageObject.getSize();
if (size == null) {
throw errorResolvingMetadataLastChunk(
getUploadId(),
chunkOffset,
chunkLength,
localPosition,
remotePosition,
lastChunk);
}
if (size.longValue() != totalBytes) {
throw unrecoverableState(
chunkOffset, chunkLength, localPosition, remotePosition, lastChunk);
getUploadId(),
chunkOffset,
chunkLength,
localPosition,
remotePosition,
lastChunk);
}
retrying = false;
} else if (uploadAlreadyComplete && !lastChunk && !checkingForLastChunk) {
Expand All @@ -201,7 +265,12 @@ public void run() {
} else {
// Case 4 && Case 8 && Case 9
throw unrecoverableState(
chunkOffset, chunkLength, localPosition, remotePosition, lastChunk);
getUploadId(),
chunkOffset,
chunkLength,
localPosition,
remotePosition,
lastChunk);
}
}
}),
Expand Down
Expand Up @@ -43,11 +43,13 @@
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.logging.Logger;

/**
* Google Storage bucket metadata;
Expand Down Expand Up @@ -101,6 +103,8 @@ public com.google.api.services.storage.model.Bucket apply(BucketInfo bucketInfo)
private final String locationType;
private final Logging logging;

private static final Logger log = Logger.getLogger(BucketInfo.class.getName());

/**
* The Bucket's IAM Configuration.
*
Expand Down Expand Up @@ -356,9 +360,11 @@ public LifecycleRule(LifecycleAction action, LifecycleCondition condition) {
&& condition.getNoncurrentTimeBefore() == null
&& condition.getCustomTimeBefore() == null
&& condition.getDaysSinceCustomTime() == null) {
throw new IllegalArgumentException(
"You must specify at least one condition to use object lifecycle "
+ "management. Please see https://cloud.google.com/storage/docs/lifecycle for details.");
log.warning(
"Creating a lifecycle condition with no supported conditions:\n"
+ this
+ "\nAttempting to update with this rule may cause errors. Please update "
+ " to the latest version of google-cloud-storage");
}

this.lifecycleAction = action;
Expand Down Expand Up @@ -1833,33 +1839,52 @@ public ObjectAccessControl apply(Acl acl) {
website.setNotFoundPage(notFoundPage);
bucketPb.setWebsite(website);
}
Set<Rule> rules = new HashSet<>();
if (deleteRules != null) {
rules.addAll(
transform(
deleteRules,
new Function<DeleteRule, Rule>() {
@Override
public Rule apply(DeleteRule deleteRule) {
return deleteRule.toPb();
}
}));
}
if (lifecycleRules != null) {
rules.addAll(
transform(
lifecycleRules,
new Function<LifecycleRule, Rule>() {
@Override
public Rule apply(LifecycleRule lifecycleRule) {
return lifecycleRule.toPb();
}
}));
}

if (rules != null) {
if (deleteRules != null || lifecycleRules != null) {
Lifecycle lifecycle = new Lifecycle();
lifecycle.setRule(ImmutableList.copyOf(rules));

// Here we determine if we need to "clear" any defined Lifecycle rules by explicitly setting
// the Rule list of lifecycle to the empty list.
// In order for us to clear the rules, one of the three following must be true:
// 1. deleteRules is null while lifecycleRules is non-null and empty
// 2. lifecycleRules is null while deleteRules is non-null and empty
// 3. lifecycleRules is non-null and empty while deleteRules is non-null and empty
// If none of the above three is true, we will interpret as the Lifecycle rules being
// updated to the defined set of DeleteRule and LifecycleRule.
if ((deleteRules == null && lifecycleRules.isEmpty())
|| (lifecycleRules == null && deleteRules.isEmpty())
|| (deleteRules != null && deleteRules.isEmpty() && lifecycleRules.isEmpty())) {
lifecycle.setRule(Collections.<Rule>emptyList());
} else {
Set<Rule> rules = new HashSet<>();
if (deleteRules != null) {
rules.addAll(
transform(
deleteRules,
new Function<DeleteRule, Rule>() {
@Override
public Rule apply(DeleteRule deleteRule) {
return deleteRule.toPb();
}
}));
}
if (lifecycleRules != null) {
rules.addAll(
transform(
lifecycleRules,
new Function<LifecycleRule, Rule>() {
@Override
public Rule apply(LifecycleRule lifecycleRule) {
return lifecycleRule.toPb();
}
}));
}

if (!rules.isEmpty()) {
lifecycle.setRule(ImmutableList.copyOf(rules));
}
}

bucketPb.setLifecycle(lifecycle);
}

Expand Down
Expand Up @@ -799,6 +799,25 @@ public long getCurrentUploadOffset(String uploadId) {
}
}

@Override
public StorageObject queryCompletedResumableUpload(String uploadId, long totalBytes) {
try {
GenericUrl url = new GenericUrl(uploadId);
HttpRequest req = storage.getRequestFactory().buildPutRequest(url, new EmptyContent());
req.getHeaders().setContentRange(String.format("bytes */%s", totalBytes));
req.setParser(storage.getObjectParser());
HttpResponse response = req.execute();
// If the response is 200
if (response.getStatusCode() == 200) {
return response.parseAs(StorageObject.class);
} else {
throw buildStorageException(response.getStatusCode(), response.getStatusMessage());
}
} catch (IOException ex) {
throw translate(ex);
}
}

@Override
public StorageObject writeWithResponse(
String uploadId,
Expand Down Expand Up @@ -864,10 +883,7 @@ public StorageObject writeWithResponse(
if (exception != null) {
throw exception;
}
GoogleJsonError error = new GoogleJsonError();
error.setCode(code);
error.setMessage(message);
throw translate(error);
throw buildStorageException(code, message);
}
} catch (IOException ex) {
span.setStatus(Status.UNKNOWN.withDescription(ex.getMessage()));
Expand Down Expand Up @@ -914,10 +930,7 @@ public String open(StorageObject object, Map<Option, ?> options) {
setEncryptionHeaders(requestHeaders, "x-goog-encryption-", options);
HttpResponse response = httpRequest.execute();
if (response.getStatusCode() != 200) {
GoogleJsonError error = new GoogleJsonError();
error.setCode(response.getStatusCode());
error.setMessage(response.getStatusMessage());
throw translate(error);
throw buildStorageException(response.getStatusCode(), response.getStatusMessage());
}
return response.getHeaders().getLocation();
} catch (IOException ex) {
Expand Down Expand Up @@ -947,10 +960,7 @@ public String open(String signedURL) {
requestHeaders.set("x-goog-resumable", "start");
HttpResponse response = httpRequest.execute();
if (response.getStatusCode() != 201) {
GoogleJsonError error = new GoogleJsonError();
error.setCode(response.getStatusCode());
error.setMessage(response.getStatusMessage());
throw translate(error);
throw buildStorageException(response.getStatusCode(), response.getStatusMessage());
}
return response.getHeaders().getLocation();
} catch (IOException ex) {
Expand Down Expand Up @@ -1610,4 +1620,11 @@ public ServiceAccount getServiceAccount(String projectId) {
span.end(HttpStorageRpcSpans.END_SPAN_OPTIONS);
}
}

private static StorageException buildStorageException(int statusCode, String statusMessage) {
GoogleJsonError error = new GoogleJsonError();
error.setCode(statusCode);
error.setMessage(statusMessage);
return translate(error);
}
}
Expand Up @@ -337,6 +337,24 @@ void write(
*/
long getCurrentUploadOffset(String uploadId);

/**
* Attempts to retrieve the StorageObject from a completed resumable upload. When a resumable
* upload completes, the response will be the up-to-date StorageObject metadata. This up-to-date
* metadata can then be used to validate the total size of the object along with new generation
* and other information.
*
* <p>If for any reason, the response to the final PUT to a resumable upload is not received, this
* method can be used to query for the up-to-date StorageObject. If the upload is complete, this
* method can be used to access the StorageObject independently from any other liveness or
* conditional criteria requirements that are otherwise applicable when using {@link
* #get(StorageObject, Map)}.
*
* @param uploadId resumable upload ID URL
* @param totalBytes the total number of bytes that should have been written.
* @throws StorageException if the upload is incomplete or does not exist
*/
StorageObject queryCompletedResumableUpload(String uploadId, long totalBytes);

/**
* Writes the provided bytes to a storage object at the provided location. If {@code last=true}
* returns metadata of the updated object, otherwise returns null.
Expand Down
Expand Up @@ -144,6 +144,11 @@ public long getCurrentUploadOffset(String uploadId) {
throw new UnsupportedOperationException("Not implemented yet");
}

@Override
public StorageObject queryCompletedResumableUpload(String uploadId, long totalBytes) {
throw new UnsupportedOperationException("Not implemented yet");
}

@Override
public StorageObject writeWithResponse(
String uploadId,
Expand Down

0 comments on commit 447d35d

Please sign in to comment.