Add support for B2 provider

Fixes #98.
pull/139/merge
Andrew Gaul 2016-05-27 16:35:56 -07:00
rodzic 1d42879316
commit 17f8fae37b
4 zmienionych plików z 118 dodań i 21 usunięć

Wyświetl plik

@ -329,6 +329,11 @@
<artifactId>filesystem</artifactId>
<version>${jclouds.version}</version>
</dependency>
<dependency>
<groupId>org.apache.jclouds.labs</groupId>
<artifactId>b2</artifactId>
<version>${jclouds.version}</version>
</dependency>
<dependency>
<groupId>org.apache.jclouds.driver</groupId>
<artifactId>jclouds-slf4j</artifactId>

Wyświetl plik

@ -24,6 +24,7 @@ final class Quirks {
/** Blobstores which do not support blob-level access control. */
static final Set<String> NO_BLOB_ACCESS_CONTROL = ImmutableSet.of(
"azureblob",
"b2",
"rackspace-cloudfiles-uk",
"rackspace-cloudfiles-us",
"openstack-swift"
@ -32,19 +33,27 @@ final class Quirks {
/** Blobstores which do not support the Cache-Control header. */
static final Set<String> NO_CACHE_CONTROL_SUPPORT = ImmutableSet.of(
"atmos",
"b2",
"google-cloud-storage",
"rackspace-cloudfiles-uk",
"rackspace-cloudfiles-us",
"openstack-swift"
);
/** Blobstores which do not support the Cache-Control header. */
static final Set<String> NO_CONTENT_DISPOSITION = ImmutableSet.of(
"b2"
);
/** Blobstores which do not support the Content-Encoding header. */
static final Set<String> NO_CONTENT_ENCODING = ImmutableSet.of(
"b2",
"google-cloud-storage"
);
/** Blobstores which do not support the Content-Language header. */
static final Set<String> NO_CONTENT_LANGUAGE = ImmutableSet.of(
"b2",
"rackspace-cloudfiles-uk",
"rackspace-cloudfiles-us",
"openstack-swift"
@ -80,12 +89,15 @@ final class Quirks {
/** Blobstores with opaque ETags. */
static final Set<String> OPAQUE_ETAG = ImmutableSet.of(
"azureblob",
"b2",
"google-cloud-storage"
);
/** Blobstores with opaque markers. */
static final Set<String> OPAQUE_MARKERS = ImmutableSet.of(
"azureblob",
// S3 marker means one past this token while B2 means this token
"b2",
"google-cloud-storage"
);

Wyświetl plik

@ -77,6 +77,7 @@ import com.google.common.hash.HashingInputStream;
import com.google.common.io.BaseEncoding;
import com.google.common.io.ByteSource;
import com.google.common.io.ByteStreams;
import com.google.common.io.FileBackedOutputStream;
import com.google.common.net.HostAndPort;
import com.google.common.net.HttpHeaders;
import com.google.common.net.PercentEscaper;
@ -183,6 +184,8 @@ final class S3ProxyHandler extends AbstractHandler {
);
private static final PercentEscaper AWS_URL_PARAMETER_ESCAPER =
new PercentEscaper("-_.~", false);
// TODO: configurable fileThreshold
private static final int B2_PUT_BLOB_BUFFER_SIZE = 1024 * 1024;
private final boolean anonymousIdentity;
private final Optional<String> virtualHost;
@ -1120,9 +1123,21 @@ final class S3ProxyHandler extends AbstractHandler {
if (!blobStore.containerExists(containerName)) {
throw new S3Exception(S3ErrorCode.NO_SUCH_BUCKET);
}
String blobStoreType = getBlobStoreType(blobStore);
if (blobStoreType.equals("b2")) {
// S3 allows deleting a container with in-progress MPU while B2 does
// not. Explicitly cancel uploads for B2.
for (MultipartUpload mpu : blobStore.listMultipartUploads(
containerName)) {
blobStore.abortMultipartUpload(mpu);
}
}
if (!blobStore.deleteContainerIfEmpty(containerName)) {
throw new S3Exception(S3ErrorCode.BUCKET_NOT_EMPTY);
}
response.setStatus(HttpServletResponse.SC_NO_CONTENT);
}
@ -1618,15 +1633,6 @@ final class S3ProxyHandler extends AbstractHandler {
return;
}
BlobBuilder.PayloadBlobBuilder builder = blobStore
.blobBuilder(blobName)
.payload(is)
.contentLength(contentLength);
addContentMetdataFromHttpRequest(builder, request);
if (contentMD5 != null) {
builder = builder.contentMD5(contentMD5);
}
PutOptions options = new PutOptions().setBlobAccess(access);
String blobStoreType = getBlobStoreType(blobStore);
@ -1634,8 +1640,30 @@ final class S3ProxyHandler extends AbstractHandler {
contentLength > 64 * 1024 * 1024) {
options.multipart(true);
}
FileBackedOutputStream fbos = null;
String eTag;
try {
BlobBuilder.PayloadBlobBuilder builder;
if (blobStoreType.equals("b2")) {
// B2 requires a repeatable payload to calculate the SHA1 hash
fbos = new FileBackedOutputStream(B2_PUT_BLOB_BUFFER_SIZE);
ByteStreams.copy(is, fbos);
fbos.close();
builder = blobStore.blobBuilder(blobName)
.payload(fbos.asByteSource());
} else {
builder = blobStore.blobBuilder(blobName)
.payload(is);
}
builder.contentLength(contentLength);
addContentMetdataFromHttpRequest(builder, request);
if (contentMD5 != null) {
builder = builder.contentMD5(contentMD5);
}
eTag = blobStore.putBlob(containerName, builder.build(),
options);
} catch (HttpResponseException hre) {
@ -1661,6 +1689,10 @@ final class S3ProxyHandler extends AbstractHandler {
} else {
throw re;
}
} finally {
if (fbos != null) {
fbos.reset();
}
}
response.addHeader(HttpHeaders.ETAG, maybeQuoteETag(eTag));
@ -2123,8 +2155,10 @@ final class S3ProxyHandler extends AbstractHandler {
long contentLength =
blobMetadata.getContentMetadata().getContentLength();
String blobStoreType = getBlobStoreType(blobStore);
FileBackedOutputStream fbos = null;
try (InputStream is = blob.getPayload().openStream()) {
if (getBlobStoreType(blobStore).equals("azureblob")) {
if (blobStoreType.equals("azureblob")) {
// Azure has a maximum part size of 4 MB while S3 has a minimum
// part size of 5 MB and a maximum of 5 GB. Split a single S3
// part multiple Azure parts.
@ -2146,13 +2180,29 @@ final class S3ProxyHandler extends AbstractHandler {
eTag = BaseEncoding.base16().lowerCase().encode(
his.hash().asBytes());
} else {
Payload payload = Payloads.newInputStreamPayload(is);
Payload payload;
if (blobStoreType.equals("b2")) {
// B2 requires a repeatable payload to calculate the SHA1
// hash
fbos = new FileBackedOutputStream(B2_PUT_BLOB_BUFFER_SIZE);
ByteStreams.copy(is, fbos);
fbos.close();
payload = Payloads.newByteSourcePayload(
fbos.asByteSource());
} else {
payload = Payloads.newInputStreamPayload(is);
}
payload.getContentMetadata().setContentLength(contentLength);
MultipartPart part = blobStore.uploadMultipartPart(mpu,
partNumber, payload);
eTag = part.partETag();
}
} finally {
if (fbos != null) {
fbos.reset();
}
}
try (Writer writer = response.getWriter()) {
@ -2276,14 +2326,34 @@ final class S3ProxyHandler extends AbstractHandler {
BaseEncoding.base16().lowerCase().encode(
his.hash().asBytes())));
} else {
Payload payload = Payloads.newInputStreamPayload(is);
payload.getContentMetadata().setContentLength(contentLength);
if (contentMD5 != null) {
payload.getContentMetadata().setContentMD5(contentMD5);
MultipartPart part;
Payload payload;
FileBackedOutputStream fbos = null;
try {
String blobStoreType = getBlobStoreType(blobStore);
if (blobStoreType.equals("b2")) {
// B2 requires a repeatable payload to calculate the SHA1
// hash
fbos = new FileBackedOutputStream(B2_PUT_BLOB_BUFFER_SIZE);
ByteStreams.copy(is, fbos);
fbos.close();
payload = Payloads.newByteSourcePayload(
fbos.asByteSource());
} else {
payload = Payloads.newInputStreamPayload(is);
}
payload.getContentMetadata().setContentLength(contentLength);
if (contentMD5 != null) {
payload.getContentMetadata().setContentMD5(contentMD5);
}
part = blobStore.uploadMultipartPart(mpu, partNumber, payload);
} finally {
if (fbos != null) {
fbos.reset();
}
}
MultipartPart part = blobStore.uploadMultipartPart(mpu,
partNumber, payload);
if (part.partETag() != null) {
response.addHeader(HttpHeaders.ETAG,
maybeQuoteETag(part.partETag()));

Wyświetl plik

@ -370,6 +370,9 @@ public final class S3ProxyTest {
cacheControl = null;
}
String contentDisposition = "attachment; filename=new.jpg";
if (Quirks.NO_CONTENT_DISPOSITION.contains(blobStoreType)) {
contentDisposition = null;
}
String contentEncoding = "gzip";
if (Quirks.NO_CONTENT_ENCODING.contains(blobStoreType)) {
contentEncoding = null;
@ -407,8 +410,10 @@ public final class S3ProxyTest {
assertThat(newContentMetadata.getCacheControl()).isEqualTo(
cacheControl);
}
assertThat(newContentMetadata.getContentDisposition()).isEqualTo(
contentDisposition);
if (!Quirks.NO_CONTENT_DISPOSITION.contains(blobStoreType)) {
assertThat(newContentMetadata.getContentDisposition()).isEqualTo(
contentDisposition);
}
if (!Quirks.NO_CONTENT_ENCODING.contains(blobStoreType)) {
assertThat(newContentMetadata.getContentEncoding()).isEqualTo(
contentEncoding);
@ -433,6 +438,9 @@ public final class S3ProxyTest {
cacheControl = null;
}
String contentDisposition = "attachment; filename=new.jpg";
if (Quirks.NO_CONTENT_DISPOSITION.contains(blobStoreType)) {
contentDisposition = null;
}
String contentEncoding = "gzip";
if (Quirks.NO_CONTENT_ENCODING.contains(blobStoreType)) {
contentEncoding = null;
@ -487,8 +495,10 @@ public final class S3ProxyTest {
assertThat(newContentMetadata.getCacheControl()).isEqualTo(
cacheControl);
}
assertThat(newContentMetadata.getContentDisposition()).isEqualTo(
contentDisposition);
if (!Quirks.NO_CONTENT_DISPOSITION.contains(blobStoreType)) {
assertThat(newContentMetadata.getContentDisposition()).isEqualTo(
contentDisposition);
}
if (!Quirks.NO_CONTENT_ENCODING.contains(blobStoreType)) {
assertThat(newContentMetadata.getContentEncoding()).isEqualTo(
contentEncoding);