Skip to content

Commit

Permalink
Merge pull request #173 from scireum/develop
Browse files Browse the repository at this point in the history
Release 7.0.2
  • Loading branch information
jakobvogel committed Jun 7, 2021
2 parents c5cc203 + 5347e00 commit df191b4
Show file tree
Hide file tree
Showing 3 changed files with 100 additions and 71 deletions.
4 changes: 2 additions & 2 deletions src/main/java/ninja/AwsHashCalculator.java
Expand Up @@ -19,8 +19,8 @@
/**
* Class in charge of generating the appropriate hash for the given request and path prefix by
* delegating the computation to either {@link Aws4HashCalculator} or {@link
* AwsLegacyHashCalculator} depending of whether or not Aws4HashCalculator supports the request
* or not
* AwsLegacyHashCalculator} depending of whether or not {@code Aws4HashCalculator} supports the
* request or not.
*/
@Register(classes = AwsHashCalculator.class)
public class AwsHashCalculator {
Expand Down
4 changes: 2 additions & 2 deletions src/main/java/ninja/S3Dispatcher.java
Expand Up @@ -270,7 +270,7 @@ public static String getEffectiveURI(WebContext webContext) {
uri = uri.substring(1);
}

return uri;
return Strings.urlEncode(uri).replace("+", "%20").replace("%2F", "/");
}

/**
Expand Down Expand Up @@ -994,7 +994,7 @@ private File getUploadDir(String uploadId) {
}

private File combineParts(String id, String uploadId, List<File> parts) {
File file = new File(getUploadDir(uploadId), id);
File file = new File(getUploadDir(uploadId), StoredObject.encodeKey(id));

try {
if (!file.createNewFile()) {
Expand Down
163 changes: 96 additions & 67 deletions src/test/java/BaseAWSSpec.groovy
Expand Up @@ -23,66 +23,77 @@ import java.time.temporal.ChronoUnit

abstract class BaseAWSSpec extends BaseSpecification {

def DEFAULT_BUCKET_NAME = "test"

def DEFAULT_KEY = "key/with/slashes and spaces 😇"

abstract AmazonS3Client getClient()

def "HEAD of non-existing bucket as expected"() {
given:
def bucketName = "does-not-exist"
def client = getClient()
when:
if (client.doesBucketExist("does-not-exist")) {
client.deleteBucket("does-not-exist")
if (client.doesBucketExist(bucketName)) {
client.deleteBucket(bucketName)
}
then:
!client.doesBucketExist("does-not-exist")
!client.doesBucketExistV2("does-not-exist")
!client.doesBucketExist(bucketName)
!client.doesBucketExistV2(bucketName)
}

def "PUT and then HEAD bucket as expected"() {
given:
def bucketName = DEFAULT_BUCKET_NAME
def client = getClient()
when:
if (client.doesBucketExist("test")) {
client.deleteBucket("test")
if (client.doesBucketExist(bucketName)) {
client.deleteBucket(bucketName)
}
client.createBucket("test")
client.createBucket(bucketName)
then:
client.doesBucketExist("test")
client.doesBucketExistV2("test")
client.doesBucketExist(bucketName)
client.doesBucketExistV2(bucketName)
}

def "DELETE of non-existing bucket as expected"() {
given:
def bucketName = "does-not-exist"
def client = getClient()
when:
if (client.doesBucketExist("does-not-exist")) {
client.deleteBucket("does-not-exist")
if (client.doesBucketExist(bucketName)) {
client.deleteBucket(bucketName)
}
and:
client.deleteBucket("does-not-exist")
client.deleteBucket(bucketName)
then:
AmazonS3Exception e = thrown()
e.statusCode == 404
!client.doesBucketExist("does-not-exist")
!client.doesBucketExistV2("does-not-exist")
!client.doesBucketExist(bucketName)
!client.doesBucketExistV2(bucketName)
}

def "PUT and then DELETE bucket as expected"() {
given:
def bucketName = DEFAULT_BUCKET_NAME
def client = getClient()
when:
if (!client.doesBucketExist("test")) {
client.createBucket("test")
if (!client.doesBucketExist(bucketName)) {
client.createBucket(bucketName)
}
client.deleteBucket("test")
client.deleteBucket(bucketName)
then:
!client.doesBucketExist("test")
!client.doesBucketExist(bucketName)
}

def "PUT and then GET file work using TransferManager"() {
when:
given:
def bucketName = DEFAULT_BUCKET_NAME
def key = DEFAULT_KEY
def client = getClient()
if (!client.doesBucketExist("test")) {
client.createBucket("test")
when:
if (!client.doesBucketExist(bucketName)) {
client.createBucket(bucketName)
}
and:
File file = File.createTempFile("test", "")
Expand All @@ -92,33 +103,35 @@ abstract class BaseAWSSpec extends BaseSpecification {
}
and:
def tm = TransferManagerBuilder.standard().withS3Client(client).build()
tm.upload("test", "test", file).waitForUploadResult()
tm.upload(bucketName, key, file).waitForUploadResult()
and:
File download = File.createTempFile("s3-test", "")
download.deleteOnExit()
tm.download("test", "test", download).waitForCompletion()
tm.download(bucketName, key, download).waitForCompletion()
then:
Files.toString(file, Charsets.UTF_8) == Files.toString(download, Charsets.UTF_8)
}

def "PUT and then GET work as expected"() {
given:
def bucketName = DEFAULT_BUCKET_NAME
def key = DEFAULT_KEY
def client = getClient()
when:
if (!client.doesBucketExist("test")) {
client.createBucket("test")
if (!client.doesBucketExist(bucketName)) {
client.createBucket(bucketName)
}
and:
client.putObject(
"test",
"test",
bucketName,
key,
new ByteArrayInputStream("Test".getBytes(Charsets.UTF_8)),
new ObjectMetadata())
def content = new String(
ByteStreams.toByteArray(client.getObject("test", "test").getObjectContent()),
ByteStreams.toByteArray(client.getObject(bucketName, key).getObjectContent()),
Charsets.UTF_8)
and:
GeneratePresignedUrlRequest request = new GeneratePresignedUrlRequest("test", "test")
GeneratePresignedUrlRequest request = new GeneratePresignedUrlRequest(bucketName, key)
URLConnection c = new URL(getClient().generatePresignedUrl(request).toString()).openConnection()
and:
String downloadedData = new String(ByteStreams.toByteArray(c.getInputStream()), Charsets.UTF_8)
Expand All @@ -130,112 +143,126 @@ abstract class BaseAWSSpec extends BaseSpecification {

def "PUT and then LIST work as expected"() {
given:
def bucketName = DEFAULT_BUCKET_NAME
def key1 = DEFAULT_KEY + "/Eins"
def key2 = DEFAULT_KEY + "/Zwei"
def client = getClient()
when:
if (client.doesBucketExist("test")) {
client.deleteBucket("test")
if (client.doesBucketExist(bucketName)) {
client.deleteBucket(bucketName)
}
client.createBucket("test")
client.createBucket(bucketName)
and:
client.putObject(
"test",
"Eins",
bucketName,
key1,
new ByteArrayInputStream("Eins".getBytes(Charsets.UTF_8)),
new ObjectMetadata())
client.putObject(
"test",
"Zwei",
bucketName,
key2,
new ByteArrayInputStream("Zwei".getBytes(Charsets.UTF_8)),
new ObjectMetadata())
then:
def listing = client.listObjects("test")
def listing = client.listObjects(bucketName)
def summaries = listing.getObjectSummaries()
summaries.size() == 2
summaries.get(0).getKey() == "Eins"
summaries.get(1).getKey() == "Zwei"
summaries.get(0).getKey() == key1
summaries.get(1).getKey() == key2
}

def "PUT and then DELETE work as expected"() {
given:
def bucketName = DEFAULT_BUCKET_NAME
def key = DEFAULT_KEY
def client = getClient()
when:
if (!client.doesBucketExist("test")) {
client.createBucket("test")
if (!client.doesBucketExist(bucketName)) {
client.createBucket(bucketName)
}
and:
client.putObject(
"test",
"test",
bucketName,
key,
new ByteArrayInputStream("Test".getBytes(Charsets.UTF_8)),
new ObjectMetadata())
client.deleteBucket("test")
client.getObject("test", "test")
client.deleteBucket(bucketName)
client.getObject(bucketName, key)
then:
AmazonS3Exception e = thrown()
e.statusCode == 404
}

def "MultipartUpload and then GET work as expected"() {
given:
def bucketName = DEFAULT_BUCKET_NAME
def key = DEFAULT_KEY
def client = getClient()
when:
def transfer = TransferManagerBuilder.standard().
withS3Client(getClient()).
withS3Client(client).
withMultipartUploadThreshold(1).
withMinimumUploadPartSize(1).build()
def meta = new ObjectMetadata()
def message = "Test".getBytes(Charsets.UTF_8)
and:
if (!getClient().doesBucketExist("test")) {
getClient().createBucket("test")
if (!client.doesBucketExist(bucketName)) {
client.createBucket(bucketName)
}
and:
meta.setContentLength(message.length)
meta.addUserMetadata("userdata", "test123")
def upload = transfer.upload("test", "test", new ByteArrayInputStream("Test".getBytes(Charsets.UTF_8)), meta)
def upload = transfer.upload(bucketName, key, new ByteArrayInputStream(message), meta)
upload.waitForUploadResult()
def content = new String(
ByteStreams.toByteArray(client.getObject("test", "test").getObjectContent()),
ByteStreams.toByteArray(client.getObject(bucketName, key).getObjectContent()),
Charsets.UTF_8)
def userdata = client.getObjectMetadata("test", "test").getUserMetaDataOf("userdata")
def userdata = client.getObjectMetadata(bucketName, key).getUserMetaDataOf("userdata")
then:
content == "Test"
userdata == "test123"
}

def "MultipartUpload and then DELETE work as expected"() {
when:
given:
def bucketName = DEFAULT_BUCKET_NAME
def key = DEFAULT_KEY
def client = getClient()
when:
def transfer = TransferManagerBuilder.standard().
withS3Client(client).
withMultipartUploadThreshold(1).
withMinimumUploadPartSize(1).build()
def meta = new ObjectMetadata()
def message = "Test".getBytes(Charsets.UTF_8)
and:
if (!getClient().doesBucketExist("test")) {
getClient().createBucket("test")
if (!client.doesBucketExist(bucketName)) {
client.createBucket(bucketName)
}
and:
meta.setContentLength(message.length)
def upload = transfer.upload("test", "test", new ByteArrayInputStream("Test".getBytes(Charsets.UTF_8)), meta)
def upload = transfer.upload(bucketName, key, new ByteArrayInputStream(message), meta)
upload.waitForUploadResult()
client.deleteBucket("test")
client.getObject("test", "test")
client.deleteBucket(bucketName)
client.getObject(bucketName, key)
then:
AmazonS3Exception e = thrown()
e.statusCode == 404
}

def "PUT on presigned URL without signed chunks works as expected"() {
given:
def bucketName = DEFAULT_BUCKET_NAME
def key = DEFAULT_KEY
def client = getClient()
when:
if (!client.doesBucketExist("test")) {
client.createBucket("test")
if (!client.doesBucketExist(bucketName)) {
client.createBucket(bucketName)
}
and:
def content = "NotSigned"
and:
GeneratePresignedUrlRequest putRequest = new GeneratePresignedUrlRequest("test", "test", HttpMethod.PUT)
GeneratePresignedUrlRequest putRequest = new GeneratePresignedUrlRequest(bucketName, key, HttpMethod.PUT)
HttpURLConnection hc = new URL(getClient().generatePresignedUrl(putRequest).toString()).openConnection()
hc.setDoOutput(true)
hc.setRequestMethod("PUT")
Expand All @@ -247,7 +274,7 @@ abstract class BaseAWSSpec extends BaseSpecification {
}
hc.getResponseCode()
and:
GeneratePresignedUrlRequest request = new GeneratePresignedUrlRequest("test", "test")
GeneratePresignedUrlRequest request = new GeneratePresignedUrlRequest(bucketName, key)
URLConnection c = new URL(getClient().generatePresignedUrl(request).toString()).openConnection()
and:
String downloadedData = new String(ByteStreams.toByteArray(c.getInputStream()), Charsets.UTF_8)
Expand All @@ -258,22 +285,24 @@ abstract class BaseAWSSpec extends BaseSpecification {
// reported in https://github.com/scireum/s3ninja/issues/153
def "PUT and then GET on presigned URL with ResponseHeaderOverrides works as expected"() {
given:
def bucketName = DEFAULT_BUCKET_NAME
def key = DEFAULT_KEY
def client = getClient()
when:
if (!client.doesBucketExist("test")) {
client.createBucket("test")
if (!client.doesBucketExist(bucketName)) {
client.createBucket(bucketName)
}
and:
client.putObject(
"test",
"test",
bucketName,
key,
new ByteArrayInputStream("Test".getBytes(Charsets.UTF_8)),
new ObjectMetadata())
def content = new String(
ByteStreams.toByteArray(client.getObject("test", "test").getObjectContent()),
ByteStreams.toByteArray(client.getObject(bucketName, key).getObjectContent()),
Charsets.UTF_8)
and:
GeneratePresignedUrlRequest request = new GeneratePresignedUrlRequest("test", "test")
GeneratePresignedUrlRequest request = new GeneratePresignedUrlRequest(bucketName, key)
.withExpiration(Date.from(Instant.now().plus(1, ChronoUnit.HOURS)))
.withResponseHeaders(
new ResponseHeaderOverrides()
Expand Down

0 comments on commit df191b4

Please sign in to comment.