From 19807d0c2a6769b18359faab9cae46a83cf13590 Mon Sep 17 00:00:00 2001 From: Andrew Gaul Date: Sun, 15 Oct 2017 13:39:27 -0700 Subject: [PATCH] Add support for storage classes Supported for head, get, put, multipart put, and list objects. Not supported for list multipart uploads and copy objects. Fixes #234. --- README.md | 1 - .../java/org/gaul/s3proxy/S3ProxyHandler.java | 47 +++++++++++++++---- .../java/org/gaul/s3proxy/AwsSdkTest.java | 17 ++++++- 3 files changed, 55 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 45fdbb8..b5c11f3 100644 --- a/README.md +++ b/README.md @@ -112,7 +112,6 @@ S3Proxy has broad compatibility with the S3 API, however, it does not support: * POST upload policies, see [#73](https://github.com/gaul/s3proxy/issues/73) * requester pays buckets * [select object content](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html) -* storage classes, see [#234](https://github.com/gaul/s3proxy/issues/234) S3Proxy emulates the following operations: diff --git a/src/main/java/org/gaul/s3proxy/S3ProxyHandler.java b/src/main/java/org/gaul/s3proxy/S3ProxyHandler.java index 1950566..ae3d402 100644 --- a/src/main/java/org/gaul/s3proxy/S3ProxyHandler.java +++ b/src/main/java/org/gaul/s3proxy/S3ProxyHandler.java @@ -92,6 +92,7 @@ import org.jclouds.blobstore.domain.MultipartPart; import org.jclouds.blobstore.domain.MultipartUpload; import org.jclouds.blobstore.domain.PageSet; import org.jclouds.blobstore.domain.StorageMetadata; +import org.jclouds.blobstore.domain.Tier; import org.jclouds.blobstore.domain.internal.MutableBlobMetadataImpl; import org.jclouds.blobstore.options.CopyOptions; import org.jclouds.blobstore.options.CreateContainerOptions; @@ -104,6 +105,7 @@ import org.jclouds.io.ContentMetadataBuilder; import org.jclouds.io.Payload; import org.jclouds.io.Payloads; import org.jclouds.rest.AuthorizationException; +import org.jclouds.s3.domain.ObjectMetadata.StorageClass; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -167,7 +169,8 @@ public class S3ProxyHandler { "x-amz-copy-source-range", "x-amz-date", "x-amz-decoded-content-length", - "x-amz-metadata-directive" + "x-amz-metadata-directive", + "x-amz-storage-class" ); private static final Set CANNED_ACLS = ImmutableSet.of( "private", @@ -606,10 +609,6 @@ public class S3ProxyHandler { if (headerName.startsWith("x-amz-meta-")) { continue; } - if (headerName.equals("x-amz-storage-class") && - request.getHeader(headerName).equals("STANDARD")) { - continue; - } if (!SUPPORTED_X_AMZ_HEADERS.contains(headerName.toLowerCase())) { logger.error("Unknown header {} with URI {}", headerName, request.getRequestURI()); @@ -1148,6 +1147,7 @@ public class S3ProxyHandler { writeSimpleElement(xml, "UploadId", upload.id()); writeInitiatorStanza(xml); writeOwnerStanza(xml); + // TODO: bogus value writeSimpleElement(xml, "StorageClass", "STANDARD"); // TODO: bogus value @@ -1398,7 +1398,8 @@ public class S3ProxyHandler { writeSimpleElement(xml, "Size", String.valueOf(metadata.getSize())); - writeSimpleElement(xml, "StorageClass", "STANDARD"); + writeSimpleElement(xml, "StorageClass", + StorageClass.fromTier(metadata.getTier()).toString()); writeOwnerStanza(xml); @@ -1787,6 +1788,15 @@ public class S3ProxyHandler { .payload(is) .contentLength(contentLength); + String storageClass = request.getHeader("x-amz-storage-class"); + if (storageClass == null || storageClass.equalsIgnoreCase("STANDARD")) { + // defaults to STANDARD + } else if (storageClass.equalsIgnoreCase("GLACIER")) { + throw new IllegalArgumentException(); + } else { + builder.tier(StorageClass.valueOf(storageClass).toTier()); + } + addContentMetdataFromHttpRequest(builder, request); if (contentMD5 != null) { builder = builder.contentMD5(contentMD5); @@ -1975,6 +1985,15 @@ public class S3ProxyHandler { addContentMetdataFromHttpRequest(builder, request); builder.contentLength(payload.size()); + String storageClass = request.getHeader("x-amz-storage-class"); + if (storageClass == null || storageClass.equalsIgnoreCase("STANDARD")) { + // defaults to STANDARD + } else if (storageClass.equalsIgnoreCase("GLACIER")) { + throw new IllegalArgumentException(); + } else { + builder.tier(StorageClass.valueOf(storageClass).toTier()); + } + BlobAccess access; String cannedAcl = request.getHeader("x-amz-acl"); if (cannedAcl == null || cannedAcl.equalsIgnoreCase("private")) { @@ -2197,6 +2216,7 @@ public class S3ProxyHandler { writeSimpleElement(xml, "UploadId", uploadId); writeInitiatorStanza(xml); writeOwnerStanza(xml); + // TODO: bogus value writeSimpleElement(xml, "StorageClass", "STANDARD"); // TODO: pagination @@ -2475,9 +2495,15 @@ public class S3ProxyHandler { } // TODO: how to reconstruct original mpu? + BlobMetadata blobMetadata; + if (Quirks.MULTIPART_REQUIRES_STUB.contains(getBlobStoreType( + blobStore))) { + blobMetadata = blobStore.blobMetadata(containerName, uploadId); + } else { + blobMetadata = createFakeBlobMetadata(blobStore); + } MultipartUpload mpu = MultipartUpload.create(containerName, - blobName, uploadId, createFakeBlobMetadata(blobStore), - new PutOptions()); + blobName, uploadId, blobMetadata, new PutOptions()); if (getBlobStoreType(blobStore).equals("azureblob")) { // Azure has a maximum part size of 4 MB while S3 has a minimum @@ -2570,6 +2596,11 @@ public class S3ProxyHandler { } response.addDateHeader(HttpHeaders.LAST_MODIFIED, metadata.getLastModified().getTime()); + Tier tier = metadata.getTier(); + if (tier != null) { + response.addHeader("x-amz-storage-class", + StorageClass.fromTier(tier).toString()); + } for (Map.Entry entry : metadata.getUserMetadata().entrySet()) { response.addHeader(USER_METADATA_PREFIX + entry.getKey(), diff --git a/src/test/java/org/gaul/s3proxy/AwsSdkTest.java b/src/test/java/org/gaul/s3proxy/AwsSdkTest.java index 23a446a..45d5ca9 100644 --- a/src/test/java/org/gaul/s3proxy/AwsSdkTest.java +++ b/src/test/java/org/gaul/s3proxy/AwsSdkTest.java @@ -75,6 +75,7 @@ import com.amazonaws.services.s3.model.ListPartsRequest; import com.amazonaws.services.s3.model.MultipartUploadListing; import com.amazonaws.services.s3.model.ObjectListing; import com.amazonaws.services.s3.model.ObjectMetadata; +import com.amazonaws.services.s3.model.ObjectTagging; import com.amazonaws.services.s3.model.PartETag; import com.amazonaws.services.s3.model.PartListing; import com.amazonaws.services.s3.model.Permission; @@ -84,6 +85,7 @@ import com.amazonaws.services.s3.model.ResponseHeaderOverrides; import com.amazonaws.services.s3.model.S3Object; import com.amazonaws.services.s3.model.S3ObjectSummary; import com.amazonaws.services.s3.model.SetBucketLoggingConfigurationRequest; +import com.amazonaws.services.s3.model.Tag; import com.amazonaws.services.s3.model.UploadPartRequest; import com.amazonaws.services.s3.model.UploadPartResult; import com.google.common.collect.ImmutableList; @@ -1366,6 +1368,19 @@ public final class AwsSdkTest { assertThat(object).isNull(); } + @Test + public void testStorageClass() throws Exception { + String blobName = "test-storage-class"; + ObjectMetadata metadata = new ObjectMetadata(); + metadata.setContentLength(BYTE_SOURCE.size()); + PutObjectRequest request = new PutObjectRequest( + containerName, blobName, BYTE_SOURCE.openStream(), metadata) + .withStorageClass("STANDARD_IA"); + client.putObject(request); + metadata = client.getObjectMetadata(containerName, blobName); + assertThat(metadata.getStorageClass()).isEqualTo("STANDARD_IA"); + } + @Test public void testUnknownHeader() throws Exception { String blobName = "test-unknown-header"; @@ -1373,7 +1388,7 @@ public final class AwsSdkTest { metadata.setContentLength(BYTE_SOURCE.size()); PutObjectRequest request = new PutObjectRequest( containerName, blobName, BYTE_SOURCE.openStream(), metadata) - .withStorageClass("REDUCED_REDUNDANCY"); + .withTagging(new ObjectTagging(ImmutableList.of())); try { client.putObject(request); Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class);