Initial commit of S3Proxy

pull/16/head
Andrew Gaul 2014-07-21 20:25:31 -07:00
commit ff84ccf19e
11 zmienionych plików z 1546 dodań i 0 usunięć

1
.gitignore vendored 100644
Wyświetl plik

@ -0,0 +1 @@
target/

202
LICENSE 100644
Wyświetl plik

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

86
README.md 100644
Wyświetl plik

@ -0,0 +1,86 @@
S3Proxy
=======
S3Proxy allows applications using the S3 API to interface with a variety of
object stores, e.g., EMC Atmos, Microsoft Azure, OpenStack Swift. It runs a
local HTTP server which translates S3 operations into provider-specific
operations. S3Proxy also allows developers to test against S3 without the cost
or latency associated with using AWS by using the local file system.
Features
--------
* create, remove, list containers
* put, get, remove, list blobs (up to 2 GB in size)
* store and retrieve user metadata
Supported object stores:
* atmos
* aws-s3
* azureblob
* cloudfiles-uk and cloudfiles-us
* filesystem (on-disk storage)
* hpcloud-objectstorage
* s3
* swift and swift-keystone
* transient (in-memory storage)
Installation
------------
S3Proxy requires Java 7 to run. Presently there is no binary release but
[Bintray](https://bintray.com/) will host releases in the future.
One can build the project by running `mvn package` which produces a binary at
`target/s3proxy`.
Examples
--------
Linux and Mac OS users can run S3Proxy either via the executable jar or by
explicitly invoking java:
```
s3proxy --properties s3proxy.conf
java -jar s3proxy --properties s3proxy.conf
```
Windows users must explicitly invoke java.
Configuration
-------------
Users can configure S3Proxy via a properties file. An example:
```
jclouds.provider=transient
jclouds.identity=identity
jclouds.credential=credential
#jclouds.endpoint=http://127.0.0.1:8081 # optional for some providers
jclouds.filesystem.basedir=/tmp/blobstore
s3proxy.endpoint=http://127.0.0.1:8080
```
Users can also set a variety of Java and
[jclouds properties](https://github.com/jclouds/jclouds/blob/master/core/src/main/java/org/jclouds/Constants.java).
Limitations
-----------
S3Proxy does not support:
* single-part uploads larger than 2 GB ([upstream issue](https://github.com/jclouds/jclouds/pull/426))
* multi-part uploads
* authorization of clients
* bucket ACLs
* server-side copy
* URL signing
* metadata with filesystem provider ([upstream issue](https://github.com/jclouds/jclouds/pull/443))
* listening on HTTPS
References
----------
[jclouds](http://jclouds.apache.org/) provides object store support for
S3Proxy. [s3fs-fuse](https://github.com/s3fs-fuse/s3fs-fuse) provides
file system access to S3 and S3Proxy allows use of other providers.
License
-------
Copyright (C) 2014 Andrew Gaul
Licensed under the Apache License, Version 2.0

199
pom.xml 100644
Wyświetl plik

@ -0,0 +1,199 @@
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.sonatype.oss</groupId>
<artifactId>oss-parent</artifactId>
<version>7</version>
</parent>
<groupId>org.gaul</groupId>
<artifactId>s3proxy</artifactId>
<version>0.0.0-SNAPSHOT</version>
<packaging>jar</packaging>
<name>S3 Proxy</name>
<url>https://github.com/andrewgaul/s3proxy</url>
<description>S3Proxy allows applications using the S3 API to interface with a variety of object stores</description>
<licenses>
<license>
<name>The Apache Software License, Version 2.0</name>
<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<connection>scm:git:git@github.com:andrewgaul/s3proxy.git</connection>
<developerConnection>scm:git:git@github.com:andrewgaul/s3proxy.git</developerConnection>
<url>git@github.com:andrewgaul/s3proxy.git</url>
</scm>
<developers>
<developer>
<name>Andrew Gaul</name>
<id>gaul</id>
<email>andrew@gaul.org</email>
</developer>
</developers>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.1</version>
<configuration>
<source>1.7</source>
<target>1.7</target>
<showDeprecation>true</showDeprecation>
<showWarnings>true</showWarnings>
<compilerArguments>
<Xlint />
</compilerArguments>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<version>2.3</version>
<configuration>
<createDependencyReducedPom>false</createDependencyReducedPom>
<filters>
<filter>
<artifact>*:*</artifact>
<excludes>
<exclude>META-INF/*.SF</exclude>
<exclude>META-INF/*.DSA</exclude>
<exclude>META-INF/*.RSA</exclude>
<exclude>about.html</exclude>
</excludes>
</filter>
</filters>
<shadedArtifactAttached>true</shadedArtifactAttached>
<transformers>
<transformer implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
<manifestEntries>
<Main-Class>org.gaul.s3proxy.S3Proxy</Main-Class>
<Implementation-Build>${buildNumber}</Implementation-Build>
<Implementation-Version>${project.version}</Implementation-Version>
<Build-Date>${timestamp}</Build-Date>
</manifestEntries>
</transformer>
<transformer implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
<transformer implementation="org.apache.maven.plugins.shade.resource.ApacheLicenseResourceTransformer"/>
<transformer implementation="org.apache.maven.plugins.shade.resource.ApacheNoticeResourceTransformer"/>
</transformers>
</configuration>
<executions>
<execution>
<id>jar-with-dependencies</id>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
<configuration>
<shadedClassifierName>jar-with-dependencies</shadedClassifierName>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.skife.maven</groupId>
<artifactId>really-executable-jar-maven-plugin</artifactId>
<version>1.3.0</version>
<configuration>
<programFile>s3proxy</programFile>
</configuration>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>really-executable-jar</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>2.15</version>
<configuration>
<parallel>classes</parallel>
<threadCount>1</threadCount>
<argLine>-Xmx256m</argLine>
<redirectTestOutputToFile>true</redirectTestOutputToFile>
<forkedProcessTimeoutInSeconds>300</forkedProcessTimeoutInSeconds>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<version>2.12.1</version>
<configuration>
<configLocation>src/main/resources/checkstyle.xml</configLocation>
<headerLocation>src/main/resources/copyright_header.txt</headerLocation>
<includeTestSourceDirectory>true</includeTestSourceDirectory>
<violationSeverity>warning</violationSeverity>
</configuration>
</plugin>
</plugins>
</build>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<!-- TODO: change to release version -->
<jclouds.version>1.8.0-SNAPSHOT</jclouds.version>
<slf4j.version>1.7.7</slf4j.version>
</properties>
<prerequisites>
<maven>3.0.5</maven>
</prerequisites>
<dependencies>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>1.1.2</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.11</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.jclouds</groupId>
<artifactId>jclouds-allblobstore</artifactId>
<version>${jclouds.version}</version>
</dependency>
<dependency>
<groupId>org.apache.jclouds.labs</groupId>
<artifactId>glacier</artifactId>
<version>${jclouds.version}</version>
</dependency>
<dependency>
<groupId>org.assertj</groupId>
<artifactId>assertj-core</artifactId>
<scope>test</scope>
<version>1.6.1</version>
</dependency>
<dependency>
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-servlet</artifactId>
<version>9.2.2.v20140723</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>${slf4j.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-simple</artifactId>
<version>${slf4j.version}</version>
</dependency>
</dependencies>
</project>

Wyświetl plik

@ -0,0 +1,111 @@
/*
* Copyright 2014 Andrew Gaul <andrew@gaul.org>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gaul.s3proxy;
import java.io.File;
import java.io.FileInputStream;
import java.io.InputStream;
import java.net.URI;
import java.util.Properties;
import com.google.common.base.Preconditions;
import org.eclipse.jetty.server.HttpConnectionFactory;
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.server.ServerConnector;
import org.jclouds.Constants;
import org.jclouds.ContextBuilder;
import org.jclouds.blobstore.BlobStore;
import org.jclouds.blobstore.BlobStoreContext;
/**
* S3Proxy translates S3 HTTP operations into jclouds provider-agnostic
* operations. This allows applications using the S3 API to interface with any
* provider that jclouds supports, e.g., EMC Atmos, Microsoft Azure,
* OpenStack Swift.
*/
public final class S3Proxy {
private static final String PROPERTY_S3PROXY_ENDPOINT = "s3proxy.endpoint";
private final Server server;
public S3Proxy(BlobStore blobStore, URI endpoint) {
Preconditions.checkNotNull(blobStore);
Preconditions.checkNotNull(endpoint);
// TODO: allow service paths?
Preconditions.checkArgument(endpoint.getPath().isEmpty(),
"endpoint path must be empty, was: " + endpoint.getPath());
server = new Server();
HttpConnectionFactory httpConnectionFactory =
new HttpConnectionFactory();
ServerConnector connector = new ServerConnector(server,
httpConnectionFactory);
connector.setHost(endpoint.getHost());
connector.setPort(endpoint.getPort());
server.addConnector(connector);
server.setHandler(new S3ProxyHandler(blobStore));
}
public void start() throws Exception {
server.start();
}
public void stop() throws Exception {
server.stop();
}
public static void main(String[] args) throws Exception {
if (args.length != 2) {
System.err.println("Usage: s3proxy --properties FILE");
System.exit(1);
}
Properties properties = new Properties();
try (InputStream is = new FileInputStream(new File(args[1]))) {
properties.load(is);
}
String provider = properties.getProperty(Constants.PROPERTY_PROVIDER);
String identity = properties.getProperty(Constants.PROPERTY_IDENTITY);
String credential = properties.getProperty(
Constants.PROPERTY_CREDENTIAL);
String endpoint = properties.getProperty(Constants.PROPERTY_ENDPOINT);
String s3ProxyEndpointString = properties.getProperty(
PROPERTY_S3PROXY_ENDPOINT);
if (provider == null || identity == null || credential == null
|| s3ProxyEndpointString == null) {
System.err.println("Properties file must contain:\n" +
Constants.PROPERTY_PROVIDER + "\n" +
Constants.PROPERTY_IDENTITY + "\n" +
Constants.PROPERTY_CREDENTIAL + "\n" +
PROPERTY_S3PROXY_ENDPOINT);
System.exit(1);
}
ContextBuilder builder = ContextBuilder
.newBuilder(provider)
.credentials(identity, credential)
.overrides(properties);
if (endpoint != null) {
builder = builder.endpoint(endpoint);
}
BlobStoreContext context = builder.build(BlobStoreContext.class);
URI s3ProxyEndpoint = new URI(s3ProxyEndpointString);
S3Proxy s3Proxy = new S3Proxy(context.getBlobStore(), s3ProxyEndpoint);
s3Proxy.start();
}
}

Wyświetl plik

@ -0,0 +1,551 @@
/*
* Copyright 2014 Andrew Gaul <andrew@gaul.org>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gaul.s3proxy;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Writer;
import java.util.Date;
import java.util.Enumeration;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.google.common.hash.HashCode;
import com.google.common.io.BaseEncoding;
import com.google.common.io.ByteStreams;
import com.google.common.net.HttpHeaders;
import org.eclipse.jetty.server.Request;
import org.eclipse.jetty.server.handler.AbstractHandler;
import org.jclouds.blobstore.BlobStore;
import org.jclouds.blobstore.ContainerNotFoundException;
import org.jclouds.blobstore.domain.Blob;
import org.jclouds.blobstore.domain.BlobBuilder;
import org.jclouds.blobstore.domain.BlobMetadata;
import org.jclouds.blobstore.domain.PageSet;
import org.jclouds.blobstore.domain.StorageMetadata;
import org.jclouds.blobstore.domain.StorageType;
import org.jclouds.blobstore.options.GetOptions;
import org.jclouds.blobstore.options.ListContainerOptions;
import org.jclouds.domain.Location;
import org.jclouds.http.HttpResponseException;
import org.jclouds.io.ContentMetadata;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
final class S3ProxyHandler extends AbstractHandler {
private static final Logger logger = LoggerFactory.getLogger(
S3ProxyHandler.class);
// TODO: support configurable metadata prefix
private static final String USER_METADATA_PREFIX = "x-amz-meta-";
// TODO: fake owner
private static final String FAKE_OWNER_ID =
"75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a";
private static final String FAKE_OWNER_DISPLAY_NAME =
"CustomersName@amazon.com";
private final BlobStore blobStore;
S3ProxyHandler(BlobStore blobStore) {
this.blobStore = Preconditions.checkNotNull(blobStore);
}
@Override
public void handle(String target, Request baseRequest,
HttpServletRequest request, HttpServletResponse response)
throws IOException {
int errorCode;
String method = request.getMethod();
String uri = request.getRequestURI();
logger.debug("request: {}", request);
switch (method) {
case "DELETE":
if (uri.lastIndexOf("/") == 0) {
errorCode = handleContainerDelete(uri.substring(1));
if (errorCode != HttpServletResponse.SC_OK) {
response.sendError(errorCode);
}
baseRequest.setHandled(true);
return;
} else {
String[] path = uri.split("/", 3);
errorCode = handleBlobRemove(response, path[1], path[2]);
if (errorCode != HttpServletResponse.SC_OK) {
response.sendError(errorCode);
}
baseRequest.setHandled(true);
return;
}
case "GET":
if (uri.equals("/")) {
errorCode = handleContainerList(response);
if (errorCode != HttpServletResponse.SC_OK) {
response.sendError(errorCode);
}
baseRequest.setHandled(true);
return;
} else if (uri.lastIndexOf("/") == 0 &&
"".equals(request.getParameter("acl"))) {
handleContainerAcl(response, uri.substring(1));
baseRequest.setHandled(true);
return;
} else if (uri.lastIndexOf("/") == 0 &&
"0".equals(request.getParameter("max-keys"))) {
errorCode = handleContainerExists(uri.substring(1));
if (errorCode != HttpServletResponse.SC_OK) {
response.sendError(errorCode);
}
baseRequest.setHandled(true);
return;
} else if (uri.lastIndexOf("/") == 0) {
handleBlobList(request, response, uri.substring(1));
baseRequest.setHandled(true);
return;
} else if (uri.lastIndexOf("/") == uri.length() - 1) {
handleBlobList(request, response, uri.substring(1,
uri.length() - 1));
baseRequest.setHandled(true);
return;
} else {
String[] path = uri.split("/", 3);
errorCode = handleGetBlob(request, response, path[1], path[2]);
if (errorCode != HttpServletResponse.SC_OK) {
response.sendError(errorCode);
}
baseRequest.setHandled(true);
return;
}
case "HEAD":
if (uri.lastIndexOf("/") == 0) {
errorCode = handleContainerExists(uri.substring(1));
if (errorCode != HttpServletResponse.SC_OK) {
response.sendError(errorCode);
}
baseRequest.setHandled(true);
return;
} else {
String[] path = uri.split("/", 3);
handleBlobMetadata(response, path[1], path[2]);
baseRequest.setHandled(true);
return;
}
case "PUT":
if (uri.lastIndexOf("/") == 0) {
handleContainerCreate(response, uri.substring(1));
baseRequest.setHandled(true);
return;
} else {
String[] path = uri.split("/", 3);
errorCode = handlePutBlob(request, response, path[1], path[2]);
if (errorCode != HttpServletResponse.SC_OK) {
response.sendError(errorCode);
}
baseRequest.setHandled(true);
return;
}
default:
logger.error("Unknown method {} with URI {}",
method, request.getRequestURI());
response.sendError(HttpServletResponse.SC_NOT_IMPLEMENTED);
baseRequest.setHandled(true);
return;
}
}
private int handleContainerAcl(HttpServletResponse response,
String containerName) {
try (Writer writer = response.getWriter()) {
writer.write("<AccessControlPolicy>\r\n" +
" <Owner>\r\n" +
" <ID>" + FAKE_OWNER_ID + "</ID>\r\n" +
" <DisplayName>" + FAKE_OWNER_DISPLAY_NAME + "</DisplayName>\r\n" +
" </Owner>\r\n" +
" <AccessControlList>\r\n" +
" <Grant>\r\n" +
" <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\r\n" +
" xsi:type=\"CanonicalUser\">\r\n" +
" <ID>" + FAKE_OWNER_ID + "</ID>\r\n" +
" <DisplayName>" + FAKE_OWNER_DISPLAY_NAME + "</DisplayName>\r\n" +
" </Grantee>\r\n" +
" <Permission>FULL_CONTROL</Permission>\r\n" +
" </Grant>\r\n" +
" </AccessControlList>\r\n" +
"</AccessControlPolicy>\r\n");
writer.flush();
} catch (IOException ioe) {
logger.error("Error writing to client: {}", ioe.getMessage());
return HttpServletResponse.SC_INTERNAL_SERVER_ERROR;
}
return HttpServletResponse.SC_OK;
}
private int handleContainerList(HttpServletResponse response) {
try (Writer writer = response.getWriter()) {
writer.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\n" +
"<ListAllMyBucketsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\r\n" +
" <Owner>\r\n" +
" <ID>" + FAKE_OWNER_ID + "</ID>\r\n" +
" <DisplayName>" + FAKE_OWNER_DISPLAY_NAME + "</DisplayName>\r\n" +
" </Owner>\r\n" +
" <Buckets>");
for (StorageMetadata metadata : blobStore.list()) {
writer.write("<Bucket>\r\n" +
" <Name>");
writer.write(metadata.getName());
writer.write("</Name>\r\n");
Date creationDate = metadata.getCreationDate();
if (creationDate != null) {
writer.write(" <CreationDate>");
writer.write(blobStore.getContext().utils().date()
.iso8601DateFormat(creationDate));
writer.write("</CreationDate>\r\n");
}
writer.write("</Bucket>\r\n");
}
writer.write(" </Buckets>\r\n" +
"</ListAllMyBucketsResult>\r\n");
writer.flush();
} catch (IOException ioe) {
logger.error("Error writing to client: {}", ioe.getMessage());
return HttpServletResponse.SC_INTERNAL_SERVER_ERROR;
}
return HttpServletResponse.SC_OK;
}
private int handleContainerExists(String containerName) {
try {
return blobStore.containerExists(containerName)
? HttpServletResponse.SC_OK
: HttpServletResponse.SC_NOT_FOUND;
} catch (RuntimeException re) {
logger.error("Error determining container existence: {}",
re.getMessage());
return HttpServletResponse.SC_FORBIDDEN;
}
}
private void handleContainerCreate(HttpServletResponse response,
String containerName) {
try {
// TODO: how to support locations?
Location location = null;
if (blobStore.createContainerInLocation(location, containerName)) {
return;
}
try (Writer writer = response.getWriter()) {
response.setStatus(HttpServletResponse.SC_CONFLICT);
writer.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\n" +
"<Error>\r\n" +
" <Code>BucketAlreadyOwnedByYou</Code>\r\n" +
" <Message>Your previous request to create the named bucket succeeded and you already own it.</Message>\r\n" +
" <BucketName>");
writer.write(containerName);
writer.write("</BucketName>\r\n" +
// TODO: RequestId
" <RequestId>4442587FB7D0A2F9</RequestId>\r\n" +
"</Error>\r\n");
writer.flush();
} catch (IOException ioe) {
logger.error("Error writing to client: {}",
ioe.getMessage());
response.setStatus(
HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
}
} catch (RuntimeException re) {
logger.error("Error creating container: {}", re.getMessage());
response.setStatus(HttpServletResponse.SC_FORBIDDEN);
}
}
private int handleContainerDelete(String containerName) {
try {
return blobStore.deleteContainerIfEmpty(containerName)
? HttpServletResponse.SC_OK
// TODO: emit BucketNotEmpty error code?
: HttpServletResponse.SC_CONFLICT;
} catch (RuntimeException re) {
logger.error("Error deleting container {}: {}", containerName,
re.getMessage());
return HttpServletResponse.SC_INTERNAL_SERVER_ERROR;
}
}
private void handleBlobList(HttpServletRequest request,
HttpServletResponse response, String containerName) {
ListContainerOptions options = new ListContainerOptions();
String delimiter = request.getParameter("delimiter");
if (!(delimiter != null && delimiter.equals("/"))) {
options = options.recursive();
}
String prefix = request.getParameter("prefix");
if (prefix != null) {
options = options.inDirectory(prefix);
}
String marker = request.getParameter("marker");
if (marker != null) {
options = options.afterMarker(request.getParameter("marker"));
}
String maxKeys = request.getParameter("max-keys");
if (maxKeys != null) {
options = options.maxResults(Integer.valueOf(maxKeys));
}
PageSet<? extends StorageMetadata> set;
try {
set = blobStore.list(containerName, options);
} catch (ContainerNotFoundException cnfe) {
response.setStatus(HttpServletResponse.SC_NOT_FOUND);
return;
}
try (Writer writer = response.getWriter()) {
response.setStatus(HttpServletResponse.SC_OK);
writer.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\n" +
"<ListBucketResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\r\n" +
" <Name>");
writer.write(containerName);
writer.write("</Name>\r\n");
if (prefix == null) {
writer.write(" <Prefix/>\r\n");
} else {
writer.write(" <Prefix>");
writer.write(prefix);
writer.write("</Prefix>\r\n");
}
writer.write(" <MaxKeys>");
writer.write(String.valueOf(set.size()));
writer.write("</MaxKeys>\r\n");
if (marker == null) {
writer.write(" <Marker/>\r\n");
} else {
writer.write(" <Marker>");
writer.write(marker);
writer.write("</Marker>\r\n");
}
if (delimiter != null) {
writer.write(" <Delimiter>");
writer.write(delimiter);
writer.write("</Delimiter>\r\n");
}
String nextMarker = set.getNextMarker();
if (nextMarker != null) {
writer.write(" <IsTruncated>true</IsTruncated>\r\n" +
" <NextMarker>");
writer.write(nextMarker);
writer.write("</NextMarker>\r\n");
} else {
writer.write(" <IsTruncated>false</IsTruncated>\r\n");
}
Set<String> commonPrefixes = new HashSet<>();
for (StorageMetadata metadata : set) {
if (metadata.getType() != StorageType.BLOB) {
commonPrefixes.add(metadata.getName());
continue;
}
writer.write(" <Contents>\r\n" +
" <Key>");
writer.write(metadata.getName());
writer.write("</Key>\r\n");
Date lastModified = metadata.getLastModified();
if (lastModified != null) {
writer.write(" <LastModified>");
writer.write(blobStore.getContext().utils().date()
.iso8601DateFormat(lastModified));
writer.write("</LastModified>\r\n");
}
String eTag = metadata.getETag();
if (eTag != null) {
writer.write(" <ETag>&quot;");
writer.write(metadata.getETag());
writer.write("&quot;</ETag>\r\n");
}
writer.write(
// TODO: StorageMetadata does not contain size
" <Size>0</Size>\r\n" +
" <StorageClass>STANDARD</StorageClass>\r\n" +
" <Owner>\r\n" +
" <ID>" + FAKE_OWNER_ID + "</ID>\r\n" +
" <DisplayName>" + FAKE_OWNER_DISPLAY_NAME + "</DisplayName>\r\n" +
" </Owner>\r\n" +
" </Contents>\r\n");
}
for (String commonPrefix : commonPrefixes) {
writer.write(" <CommonPrefixes>\r\n" +
" <Prefix>");
writer.write(commonPrefix);
writer.write("</Prefix>\r\n" +
" </CommonPrefixes>\r\n");
}
writer.write("</ListBucketResult>");
writer.flush();
} catch (IOException ioe) {
logger.error("Error writing to client: {}",
ioe.getMessage());
response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
}
}
private int handleBlobRemove(HttpServletResponse response,
String containerName, String blobName) {
try {
blobStore.removeBlob(containerName, blobName);
return HttpServletResponse.SC_OK;
} catch (RuntimeException re) {
logger.error("Error removing blob {} {}: {}", containerName,
blobName, re.getMessage());
return HttpServletResponse.SC_INTERNAL_SERVER_ERROR;
}
}
private void handleBlobMetadata(HttpServletResponse response,
String containerName, String blobName) {
BlobMetadata metadata = blobStore.blobMetadata(containerName, blobName);
if (metadata == null) {
response.setStatus(HttpServletResponse.SC_NOT_FOUND);
return;
}
response.setStatus(HttpServletResponse.SC_OK);
addMetadataToResponse(response, metadata);
}
private int handleGetBlob(HttpServletRequest request,
HttpServletResponse response, String containerName,
String blobName) {
GetOptions options = new GetOptions();
String range = request.getHeader(HttpHeaders.RANGE);
if (range != null && range.startsWith("bytes=")
// ignore multiple ranges
&& range.indexOf(',') == -1) {
range = range.substring("bytes=".length());
String[] ranges = range.split("-", 2);
options = options.range(Long.parseLong(ranges[0]),
Long.parseLong(ranges[1]));
}
Blob blob = blobStore.getBlob(containerName, blobName, options);
if (blob == null) {
return HttpServletResponse.SC_NOT_FOUND;
}
response.setStatus(HttpServletResponse.SC_OK);
addMetadataToResponse(response, blob.getMetadata());
try (InputStream is = blob.getPayload().openStream();
OutputStream os = response.getOutputStream()) {
ByteStreams.copy(is, os);
os.flush();
} catch (IOException ioe) {
logger.error("Error writing to client: {}", ioe.getMessage());
return HttpServletResponse.SC_INTERNAL_SERVER_ERROR;
}
return HttpServletResponse.SC_OK;
}
private int handlePutBlob(HttpServletRequest request,
HttpServletResponse response, String containerName,
String blobName) throws IOException {
ImmutableMap.Builder<String, String> userMetadata =
ImmutableMap.builder();
Enumeration<String> enumeration = request.getHeaderNames();
while (enumeration.hasMoreElements()) {
String headerName = enumeration.nextElement();
if (headerName.toLowerCase().startsWith(USER_METADATA_PREFIX)) {
userMetadata.put(
headerName.substring(USER_METADATA_PREFIX.length()),
request.getHeader(headerName));
}
}
String contentMD5String = request.getHeader(HttpHeaders.CONTENT_MD5);
HashCode contentMD5 = contentMD5String == null ? null
: HashCode.fromBytes(BaseEncoding.base64().decode(
contentMD5String));
try (InputStream is = request.getInputStream()) {
BlobBuilder.PayloadBlobBuilder builder = blobStore
.blobBuilder(blobName)
.userMetadata(userMetadata.build())
.payload(is)
.contentDisposition(request.getHeader(
HttpHeaders.CONTENT_DISPOSITION))
.contentEncoding(request.getHeader(
HttpHeaders.CONTENT_ENCODING))
.contentLanguage(request.getHeader(
HttpHeaders.CONTENT_LANGUAGE))
.contentLength(request.getContentLength())
.contentType(request.getContentType());
long expires = request.getDateHeader(HttpHeaders.EXPIRES);
if (expires != 0) {
builder = builder.expires(new Date(expires));
}
if (contentMD5 != null) {
builder = builder.contentMD5(contentMD5);
}
try {
String eTag = blobStore.putBlob(containerName, builder.build());
response.addHeader(HttpHeaders.ETAG, eTag);
} catch (HttpResponseException hre) {
// TODO: emit hre.getContent() ?
return hre.getResponse().getStatusCode();
}
return HttpServletResponse.SC_OK;
} catch (IOException ioe) {
logger.error("Error reading from client: {}", ioe.getMessage());
return HttpServletResponse.SC_INTERNAL_SERVER_ERROR;
}
}
private static void addMetadataToResponse(HttpServletResponse response,
BlobMetadata metadata) {
ContentMetadata contentMetadata =
metadata.getContentMetadata();
response.addHeader(HttpHeaders.CONTENT_DISPOSITION,
contentMetadata.getContentDisposition());
response.addHeader(HttpHeaders.CONTENT_ENCODING,
contentMetadata.getContentEncoding());
response.addHeader(HttpHeaders.CONTENT_LANGUAGE,
contentMetadata.getContentLanguage());
response.addHeader(HttpHeaders.CONTENT_LENGTH,
contentMetadata.getContentLength().toString());
response.setContentType(contentMetadata.getContentType());
response.addHeader(HttpHeaders.CONTENT_MD5,
BaseEncoding.base64().encode(
contentMetadata.getContentMD5AsHashCode().asBytes()));
Date expires = contentMetadata.getExpires();
if (expires != null) {
response.addDateHeader(HttpHeaders.EXPIRES, expires.getTime());
}
response.addDateHeader(HttpHeaders.LAST_MODIFIED,
metadata.getLastModified().getTime());
for (Map.Entry<String, String> entry :
metadata.getUserMetadata().entrySet()) {
response.addHeader(USER_METADATA_PREFIX + entry.getKey(),
entry.getValue());
}
}
}

Wyświetl plik

@ -0,0 +1,111 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE module PUBLIC "-//Puppy Crawl//DTD Check Configuration 1.3//EN" "http://www.puppycrawl.com/dtds/configuration_1_3.dtd">
<module name="Checker">
<property name="severity" value="warning"/>
<module name="FileLength">
<property name="max" value="4000"/>
</module>
<module name="FileTabCharacter"/>
<module name="JavadocPackage">
<property name="severity" value="ignore"/>
<metadata name="net.sf.eclipsecs.core.lastEnabledSeverity" value="inherit"/>
</module>
<module name="NewlineAtEndOfFile"/>
<module name="Translation"/>
<module name="TreeWalker">
<property name="cacheFile" value="target/cachefile"/>
<module name="ArrayTypeStyle"/>
<module name="AvoidNestedBlocks"/>
<module name="AvoidStarImport"/>
<module name="DesignForExtension"/>
<module name="EmptyBlock">
<property name="option" value="text"/>
</module>
<module name="EmptyForIteratorPad"/>
<module name="EmptyStatement"/>
<module name="EqualsHashCode"/>
<module name="FinalClass"/>
<module name="HiddenField">
<property name="ignoreConstructorParameter" value="true"/>
<property name="ignoreSetter" value="true"/>
</module>
<module name="HideUtilityClassConstructor"/>
<module name="IllegalImport"/>
<module name="IllegalInstantiation"/>
<module name="ImportOrder">
<property name="groups" value="java,javax,com,org"/>
<property name="separated" value="true"/>
<property name="option" value="top"/>
</module>
<module name="InnerAssignment"/>
<module name="InterfaceIsType"/>
<module name="JavadocStyle"/>
<module name="LeftCurly"/>
<module name="LineLength">
<!-- ignore long string literals -->
<property name="ignorePattern" value="^ *&quot;.*&quot; \+$"/>
</module>
<module name="LocalFinalVariableName"/>
<module name="LocalVariableName"/>
<module name="MagicNumber">
<property name="severity" value="ignore"/>
<metadata name="net.sf.eclipsecs.core.lastEnabledSeverity" value="inherit"/>
</module>
<module name="MemberName">
<property name="applyToPublic" value="false"/>
</module>
<module name="MethodLength">
<property name="max" value="500"/>
<property name="countEmpty" value="false"/>
</module>
<module name="MethodName"/>
<module name="MethodParamPad"/>
<module name="MissingSwitchDefault"/>
<module name="ModifierOrder"/>
<module name="MultipleVariableDeclarations"/>
<module name="MutableException"/>
<module name="NeedBraces"/>
<module name="NoWhitespaceAfter"/>
<module name="NoWhitespaceBefore"/>
<module name="OneStatementPerLine"/>
<module name="PackageName"/>
<module name="ParameterName"/>
<module name="ParameterNumber">
<property name="max" value="12"/>
</module>
<module name="ParenPad"/>
<module name="RedundantImport"/>
<module name="RedundantModifier"/>
<module name="RedundantThrows">
<property name="suppressLoadErrors" value="true"/>
</module>
<module name="RightCurly"/>
<module name="SimplifyBooleanExpression"/>
<module name="SimplifyBooleanReturn"/>
<module name="StaticVariableName"/>
<module name="TodoComment">
<property name="severity" value="ignore"/>
<metadata name="net.sf.eclipsecs.core.lastEnabledSeverity" value="inherit"/>
</module>
<module name="TypecastParenPad"/>
<module name="TypeName"/>
<module name="UnusedImports"/>
<module name="UpperEll"/>
<module name="VisibilityModifier">
<property name="protectedAllowed" value="true"/>
<property name="publicMemberPattern"
value="(^thrown$|^[A-Z][A-Z0-9]*(_[A-Z0-9]+)*$)"/>
</module>
<module name="WhitespaceAfter"/>
<module name="WhitespaceAround"/>
</module>
<module name="Header">
<property name="fileExtensions" value="java"/>
<property name="headerFile" value="${checkstyle.header.file}"/>
</module>
<module name="RegexpSingleline">
<property name="format" value="\s+$"/>
<property name="message" value="Line has trailing spaces."/>
</module>
</module>

Wyświetl plik

@ -0,0 +1,16 @@
/*
* Copyright 2014 Andrew Gaul <andrew@gaul.org>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

Wyświetl plik

@ -0,0 +1,16 @@
<configuration>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>%.-1p %d{MM-dd HH:mm:ss.SSS} %t %c{30}:%L %X{clientId}|%X{sessionId}:%X{messageId}:%X{fileId}] %m%n</pattern>
</encoder>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>${LOG_LEVEL:-info}</level>
</filter>
</appender>
<logger name="org.eclipse.jetty" level="${JETTY_LOG_LEVEL:-info}" />
<root level="${LOG_LEVEL:-info}">
<appender-ref ref="STDOUT" />
</root>
</configuration>

Wyświetl plik

@ -0,0 +1,7 @@
jclouds.provider=transient
jclouds.identity=identity
jclouds.credential=credential
#jclouds.endpoint=http://127.0.0.1:8081
jclouds.filesystem.basedir=/tmp/blobstore
s3proxy.endpoint=http://127.0.0.1:8080
#s3proxy.loglevel=INFO # TODO: not yet supported

Wyświetl plik

@ -0,0 +1,246 @@
/*
* Copyright 2014 Andrew Gaul <andrew@gaul.org>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gaul.s3proxy;
import static org.assertj.core.api.Assertions.assertThat;
import java.io.InputStream;
import java.net.URI;
import java.util.Properties;
import com.google.common.collect.ImmutableSet;
import com.google.common.io.ByteSource;
import org.jclouds.ContextBuilder;
import org.jclouds.blobstore.BlobStore;
import org.jclouds.blobstore.BlobStoreContext;
import org.jclouds.blobstore.domain.Blob;
import org.jclouds.blobstore.domain.BlobMetadata;
import org.jclouds.blobstore.domain.StorageMetadata;
import org.jclouds.blobstore.options.ListContainerOptions;
import org.jclouds.io.Payload;
import org.jclouds.io.payloads.ByteSourcePayload;
import org.jclouds.rest.HttpClient;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
public final class S3ProxyTest {
// TODO: configurable
private static final URI s3Endpoint = URI.create("http://127.0.0.1:8080");
private S3Proxy s3Proxy;
private BlobStoreContext context;
private BlobStoreContext s3Context;
private BlobStore s3BlobStore;
private static final String containerName = "container";
@Before
public void setUp() throws Exception {
Properties properties = new Properties();
context = ContextBuilder
.newBuilder("transient")
.credentials("identity", "credential")
.overrides(properties)
.build(BlobStoreContext.class);
BlobStore blobStore = context.getBlobStore();
blobStore.createContainerInLocation(null, containerName);
s3Context = ContextBuilder
.newBuilder("s3")
.credentials("identity", "credential")
.endpoint(s3Endpoint.toString())
.build(BlobStoreContext.class);
s3BlobStore = s3Context.getBlobStore();
s3Proxy = new S3Proxy(blobStore, s3Endpoint);
s3Proxy.start();
}
@After
public void tearDown() throws Exception {
if (s3Proxy != null) {
s3Proxy.stop();
}
if (s3Context != null) {
s3Context.close();
}
if (context != null) {
context.close();
}
}
// TODO: why does this hang for 30 seconds?
@Ignore
@Test
public void testHttpClient() throws Exception {
HttpClient httpClient = context.utils().http();
// TODO: how to interpret this?
URI uri = URI.create(s3Endpoint + "/container/blob");
ByteSource byteSource = ByteSource.wrap(new byte[1]);
Payload payload = new ByteSourcePayload(byteSource);
payload.getContentMetadata().setContentLength(byteSource.size());
httpClient.put(uri, payload);
try (InputStream actual = httpClient.get(uri);
InputStream expected = byteSource.openStream()) {
assertThat(actual).hasContentEqualTo(expected);
}
}
@Test
public void testJcloudsClient() throws Exception {
ImmutableSet.Builder<String> builder = ImmutableSet.builder();
for (StorageMetadata metadata : s3BlobStore.list()) {
builder.add(metadata.getName());
}
assertThat(builder.build()).containsOnly(containerName);
}
@Test
public void testContainerExists() throws Exception {
assertThat(s3BlobStore.containerExists("fakecontainer")).isFalse();
assertThat(s3BlobStore.containerExists(containerName)).isTrue();
}
@Test
public void testContainerCreate() throws Exception {
assertThat(s3BlobStore.createContainerInLocation(null,
"newcontainer")).isTrue();
assertThat(s3BlobStore.createContainerInLocation(null,
"newcontainer")).isFalse();
}
@Test
public void testContainerDelete() throws Exception {
assertThat(s3BlobStore.containerExists(containerName)).isTrue();
s3BlobStore.deleteContainerIfEmpty(containerName);
assertThat(s3BlobStore.containerExists(containerName)).isFalse();
}
@Test
public void testBlobPutGet() throws Exception {
String blobName = "blob";
ByteSource byteSource = ByteSource.wrap(new byte[42]);
Blob blob = s3BlobStore.blobBuilder(blobName)
.payload(byteSource)
.contentLength(byteSource.size())
.build();
s3BlobStore.putBlob(containerName, blob);
Blob blob2 = s3BlobStore.getBlob(containerName, blobName);
try (InputStream actual = blob2.getPayload().openStream();
InputStream expected = byteSource.openStream()) {
assertThat(actual).hasContentEqualTo(expected);
}
}
@Test
public void testBlobList() throws Exception {
assertThat(s3BlobStore.list(containerName)).isEmpty();
// TODO: hang with zero length blobs?
ByteSource byteSource = ByteSource.wrap(new byte[1]);
ImmutableSet.Builder<String> builder = ImmutableSet.builder();
Blob blob1 = s3BlobStore.blobBuilder("blob1")
.payload(byteSource)
.contentLength(byteSource.size())
.build();
s3BlobStore.putBlob(containerName, blob1);
for (StorageMetadata metadata : s3BlobStore.list(containerName)) {
builder.add(metadata.getName());
}
assertThat(builder.build()).containsOnly("blob1");
builder = ImmutableSet.builder();
Blob blob2 = s3BlobStore.blobBuilder("blob2")
.payload(byteSource)
.contentLength(byteSource.size())
.build();
s3BlobStore.putBlob(containerName, blob2);
for (StorageMetadata metadata : s3BlobStore.list(containerName)) {
builder.add(metadata.getName());
}
assertThat(builder.build()).containsOnly("blob1", "blob2");
}
@Test
public void testBlobListRecursive() throws Exception {
assertThat(s3BlobStore.list(containerName)).isEmpty();
ByteSource byteSource = ByteSource.wrap(new byte[1]);
Blob blob1 = s3BlobStore.blobBuilder("prefix/blob1")
.payload(byteSource)
.contentLength(byteSource.size())
.build();
s3BlobStore.putBlob(containerName, blob1);
Blob blob2 = s3BlobStore.blobBuilder("prefix/blob2")
.payload(byteSource)
.contentLength(byteSource.size())
.build();
s3BlobStore.putBlob(containerName, blob2);
ImmutableSet.Builder<String> builder = ImmutableSet.builder();
for (StorageMetadata metadata : s3BlobStore.list(containerName)) {
builder.add(metadata.getName());
}
assertThat(builder.build()).containsOnly("prefix");
builder = ImmutableSet.builder();
for (StorageMetadata metadata : s3BlobStore.list(containerName,
new ListContainerOptions().recursive())) {
builder.add(metadata.getName());
}
assertThat(builder.build()).containsOnly("prefix/blob1",
"prefix/blob2");
}
@Test
public void testBlobMetadata() throws Exception {
String blobName = "blob";
ByteSource byteSource = ByteSource.wrap(new byte[1]);
Blob blob1 = s3BlobStore.blobBuilder(blobName)
.payload(byteSource)
.contentLength(byteSource.size())
.build();
s3BlobStore.putBlob(containerName, blob1);
BlobMetadata metadata = s3BlobStore.blobMetadata(containerName,
blobName);
assertThat(metadata.getName()).isEqualTo(blobName);
assertThat(metadata.getContentMetadata().getContentLength())
.isEqualTo(byteSource.size());
assertThat(s3BlobStore.blobMetadata(containerName,
"fake-blob")).isNull();
}
@Test
public void testBlobRemove() throws Exception {
String blobName = "blob";
ByteSource byteSource = ByteSource.wrap(new byte[1]);
Blob blob = s3BlobStore.blobBuilder(blobName)
.payload(byteSource)
.contentLength(byteSource.size())
.build();
s3BlobStore.putBlob(containerName, blob);
assertThat(s3BlobStore.blobExists(containerName, blobName)).isTrue();
s3BlobStore.removeBlob(containerName, blobName);
assertThat(s3BlobStore.blobExists(containerName, blobName)).isFalse();
s3BlobStore.removeBlob(containerName, blobName);
}
}