Replace MVStore with MySQL

trunk
Una Thompson 2019-09-22 02:12:06 -07:00
rodzic 096dab30a5
commit e924145ef1
5 zmienionych plików z 225 dodań i 166 usunięć

Wyświetl plik

@ -9,7 +9,7 @@ repositories {
dependencies { dependencies {
implementation 'blue.endless:jankson:1.1.2' implementation 'blue.endless:jankson:1.1.2'
implementation 'com.h2database:h2-mvstore:1.4.199' implementation 'org.mariadb.jdbc:mariadb-java-client:2.4.4'
implementation 'org.gaul:s3proxy:1.6.1' implementation 'org.gaul:s3proxy:1.6.1'
} }

Wyświetl plik

@ -6,6 +6,13 @@
bucket: "mybucket" bucket: "mybucket"
publicHost: "https://sfo2.digitaloceanspaces.com/mybucket" publicHost: "https://sfo2.digitaloceanspaces.com/mybucket"
} }
mysql: {
host: "localhost"
port: 3306
user: "jortage"
pass: "password"
database: "jortage"
}
users: { users: {
// ACCESS_KEY_ID: "SECRET_ACCESS_KEY" // ACCESS_KEY_ID: "SECRET_ACCESS_KEY"
test: "test" test: "test"

Wyświetl plik

@ -6,9 +6,10 @@ import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.io.UncheckedIOException; import java.io.UncheckedIOException;
import java.util.List; import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutorService; import java.util.concurrent.ExecutorService;
import javax.sql.DataSource;
import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.BlobStore;
import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.blobstore.BlobStoreContext;
import org.jclouds.blobstore.domain.Blob; import org.jclouds.blobstore.domain.Blob;
@ -35,24 +36,22 @@ import org.jclouds.io.payloads.FilePayload;
import com.google.common.base.Objects; import com.google.common.base.Objects;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import com.google.common.hash.HashCode;
import com.google.common.hash.Hashing; import com.google.common.hash.Hashing;
import com.google.common.hash.HashingOutputStream; import com.google.common.hash.HashingOutputStream;
import com.google.common.io.ByteStreams; import com.google.common.io.ByteStreams;
import com.google.common.io.CountingOutputStream;
public class JortageBlobStore extends ForwardingBlobStore { public class JortageBlobStore extends ForwardingBlobStore {
private final String identity; private final String identity;
private final String bucket; private final String bucket;
private final Map<String, String> paths; private final DataSource dataSource;
public JortageBlobStore(BlobStore blobStore, String bucket, String identity, Map<String, String> paths) { public JortageBlobStore(BlobStore blobStore, String bucket, String identity, DataSource dataSource) {
super(blobStore); super(blobStore);
this.bucket = bucket; this.bucket = bucket;
this.identity = identity; this.identity = identity;
this.paths = paths; this.dataSource = dataSource;
}
private String buildKey(String name) {
return JortageProxy.buildKey(identity, name);
} }
private void checkContainer(String container) { private void checkContainer(String container) {
@ -61,15 +60,9 @@ public class JortageBlobStore extends ForwardingBlobStore {
} }
} }
private String mapHash(String container, String name) { private String getMapPath(String container, String name) {
checkContainer(container); checkContainer(container);
String hash = paths.get(buildKey(name)); return JortageProxy.hashToPath(Queries.getMap(dataSource, container, name).toString());
if (hash == null) throw new IllegalArgumentException("Not found");
return hash;
}
private String map(String container, String name) {
return JortageProxy.hashToPath(mapHash(container, name));
} }
@Override @Override
@ -84,32 +77,32 @@ public class JortageBlobStore extends ForwardingBlobStore {
@Override @Override
public Blob getBlob(String container, String name) { public Blob getBlob(String container, String name) {
return delegate().getBlob(bucket, map(container, name)); return delegate().getBlob(bucket, getMapPath(container, name));
} }
@Override @Override
public Blob getBlob(String container, String name, GetOptions getOptions) { public Blob getBlob(String container, String name, GetOptions getOptions) {
return delegate().getBlob(bucket, map(container, name), getOptions); return delegate().getBlob(bucket, getMapPath(container, name), getOptions);
} }
@Override @Override
public void downloadBlob(String container, String name, File destination) { public void downloadBlob(String container, String name, File destination) {
delegate().downloadBlob(bucket, map(container, name), destination); delegate().downloadBlob(bucket, getMapPath(container, name), destination);
} }
@Override @Override
public void downloadBlob(String container, String name, File destination, ExecutorService executor) { public void downloadBlob(String container, String name, File destination, ExecutorService executor) {
delegate().downloadBlob(bucket, map(container, name), destination, executor); delegate().downloadBlob(bucket, getMapPath(container, name), destination, executor);
} }
@Override @Override
public InputStream streamBlob(String container, String name) { public InputStream streamBlob(String container, String name) {
return delegate().streamBlob(bucket, map(container, name)); return delegate().streamBlob(bucket, getMapPath(container, name));
} }
@Override @Override
public InputStream streamBlob(String container, String name, ExecutorService executor) { public InputStream streamBlob(String container, String name, ExecutorService executor) {
return delegate().streamBlob(bucket, map(container, name), executor); return delegate().streamBlob(bucket, getMapPath(container, name), executor);
} }
@Override @Override
@ -140,12 +133,12 @@ public class JortageBlobStore extends ForwardingBlobStore {
@Override @Override
public boolean blobExists(String container, String name) { public boolean blobExists(String container, String name) {
return delegate().blobExists(bucket, map(container, name)); return delegate().blobExists(bucket, getMapPath(container, name));
} }
@Override @Override
public BlobMetadata blobMetadata(String container, String name) { public BlobMetadata blobMetadata(String container, String name) {
return delegate().blobMetadata(bucket, map(container, name)); return delegate().blobMetadata(bucket, getMapPath(container, name));
} }
@Override @Override
@ -176,26 +169,28 @@ public class JortageBlobStore extends ForwardingBlobStore {
File f = File.createTempFile("jortage-proxy-", ".dat"); File f = File.createTempFile("jortage-proxy-", ".dat");
tempFile = f; tempFile = f;
String contentType = blob.getPayload().getContentMetadata().getContentType(); String contentType = blob.getPayload().getContentMetadata().getContentType();
String hash; HashCode hash;
try (InputStream is = blob.getPayload().openStream(); try (InputStream is = blob.getPayload().openStream();
FileOutputStream fos = new FileOutputStream(f)) { FileOutputStream fos = new FileOutputStream(f)) {
HashingOutputStream hos = new HashingOutputStream(Hashing.sha512(), fos); HashingOutputStream hos = new HashingOutputStream(Hashing.sha512(), fos);
ByteStreams.copy(is, hos); ByteStreams.copy(is, hos);
hash = hos.hash().toString(); hash = hos.hash();
} }
String hashString = hash.toString();
try (Payload payload = new FilePayload(f)) { try (Payload payload = new FilePayload(f)) {
payload.getContentMetadata().setContentType(contentType); payload.getContentMetadata().setContentType(contentType);
if (delegate().blobExists(bucket, JortageProxy.hashToPath(hash))) { if (delegate().blobExists(bucket, JortageProxy.hashToPath(hashString))) {
String etag = delegate().blobMetadata(bucket, JortageProxy.hashToPath(hash)).getETag(); String etag = delegate().blobMetadata(bucket, JortageProxy.hashToPath(hashString)).getETag();
paths.put(buildKey(blob.getMetadata().getName()), hash); Queries.putMap(dataSource, identity, blob.getMetadata().getName(), hash);
return etag; return etag;
} }
Blob blob2 = blobBuilder(JortageProxy.hashToPath(hash)) Blob blob2 = blobBuilder(JortageProxy.hashToPath(hashString))
.payload(payload) .payload(payload)
.userMetadata(blob.getMetadata().getUserMetadata()) .userMetadata(blob.getMetadata().getUserMetadata())
.build(); .build();
String etag = delegate().putBlob(bucket, blob2, new PutOptions().setBlobAccess(BlobAccess.PUBLIC_READ).multipart()); String etag = delegate().putBlob(bucket, blob2, new PutOptions().setBlobAccess(BlobAccess.PUBLIC_READ).multipart());
paths.put(buildKey(blob.getMetadata().getName()), hash); Queries.putMap(dataSource, identity, blob.getMetadata().getName(), hash);
Queries.putFilesize(dataSource, hash, f.length());
return etag; return etag;
} }
} catch (IOException e) { } catch (IOException e) {
@ -208,33 +203,33 @@ public class JortageBlobStore extends ForwardingBlobStore {
@Override @Override
public String copyBlob(String fromContainer, String fromName, String toContainer, String toName, CopyOptions options) { public String copyBlob(String fromContainer, String fromName, String toContainer, String toName, CopyOptions options) {
// javadoc says options are ignored, so we ignore them too // javadoc says options are ignored, so we ignore them too
checkContainer(fromContainer);
checkContainer(toContainer); checkContainer(toContainer);
String hash = mapHash(fromContainer, fromName); HashCode hash = Queries.getMap(dataSource, identity, fromName);
paths.put(buildKey(toName), hash); Queries.putMap(dataSource, identity, toName, hash);
return blobMetadata(bucket, JortageProxy.hashToPath(hash)).getETag(); return blobMetadata(bucket, JortageProxy.hashToPath(hash.toString())).getETag();
} }
@Override @Override
public MultipartUpload initiateMultipartUpload(String container, BlobMetadata blobMetadata, PutOptions options) { public MultipartUpload initiateMultipartUpload(String container, BlobMetadata blobMetadata, PutOptions options) {
checkContainer(container); checkContainer(container);
MutableBlobMetadata mbm = new MutableBlobMetadataImpl(blobMetadata); MutableBlobMetadata mbm = new MutableBlobMetadataImpl(blobMetadata);
String name = "multitmp/"+identity+"-"+System.currentTimeMillis()+"-"+System.nanoTime(); String tempfile = "multitmp/"+identity+"-"+System.currentTimeMillis()+"-"+System.nanoTime();
mbm.setName(name); mbm.setName(tempfile);
mbm.getUserMetadata().put("jortage-creator", identity); mbm.getUserMetadata().put("jortage-creator", identity);
mbm.getUserMetadata().put("jortage-originalname", blobMetadata.getName()); mbm.getUserMetadata().put("jortage-originalname", blobMetadata.getName());
paths.put("multipart:"+buildKey(blobMetadata.getName()), name); Queries.putMultipart(dataSource, identity, blobMetadata.getName(), tempfile);
paths.put("multipart-rev:"+name, blobMetadata.getName());
return delegate().initiateMultipartUpload(bucket, mbm, new PutOptions().setBlobAccess(BlobAccess.PUBLIC_READ)); return delegate().initiateMultipartUpload(bucket, mbm, new PutOptions().setBlobAccess(BlobAccess.PUBLIC_READ));
} }
private MultipartUpload mask(MultipartUpload mpu) { private MultipartUpload mask(MultipartUpload mpu) {
checkContainer(mpu.containerName()); checkContainer(mpu.containerName());
return MultipartUpload.create(bucket, Preconditions.checkNotNull(paths.get("multipart:"+buildKey(mpu.blobName()))), mpu.id(), mpu.blobMetadata(), new PutOptions().setBlobAccess(BlobAccess.PUBLIC_READ)); return MultipartUpload.create(bucket, Queries.getMultipart(dataSource, identity, mpu.blobName()), mpu.id(), mpu.blobMetadata(), new PutOptions().setBlobAccess(BlobAccess.PUBLIC_READ));
} }
private MultipartUpload revmask(MultipartUpload mpu) { private MultipartUpload revmask(MultipartUpload mpu) {
checkContainer(mpu.containerName()); checkContainer(mpu.containerName());
return MultipartUpload.create(bucket, Preconditions.checkNotNull(paths.get("multipart-rev:"+mpu.blobName())), mpu.id(), mpu.blobMetadata(), new PutOptions().setBlobAccess(BlobAccess.PUBLIC_READ)); return MultipartUpload.create(bucket, Queries.getMultipartRev(dataSource, mpu.blobName()), mpu.id(), mpu.blobMetadata(), new PutOptions().setBlobAccess(BlobAccess.PUBLIC_READ));
} }
@Override @Override
@ -244,15 +239,16 @@ public class JortageBlobStore extends ForwardingBlobStore {
@Override @Override
public String completeMultipartUpload(MultipartUpload mpu, List<MultipartPart> parts) { public String completeMultipartUpload(MultipartUpload mpu, List<MultipartPart> parts) {
String origKey = buildKey(mpu.blobName());
mpu = mask(mpu); mpu = mask(mpu);
// TODO this is a bit of a hack and isn't very efficient // TODO this is a bit of a hack and isn't very efficient
String etag = delegate().completeMultipartUpload(mpu, parts); String etag = delegate().completeMultipartUpload(mpu, parts);
try (InputStream stream = delegate().getBlob(mpu.containerName(), mpu.blobName()).getPayload().openStream()) { try (InputStream stream = delegate().getBlob(mpu.containerName(), mpu.blobName()).getPayload().openStream()) {
HashingOutputStream hos = new HashingOutputStream(Hashing.sha512(), ByteStreams.nullOutputStream()); CountingOutputStream counter = new CountingOutputStream(ByteStreams.nullOutputStream());
HashingOutputStream hos = new HashingOutputStream(Hashing.sha512(), counter);
ByteStreams.copy(stream, hos); ByteStreams.copy(stream, hos);
String hash = hos.hash().toString(); HashCode hash = hos.hash();
String path = JortageProxy.hashToPath(hash); String hashStr = hash.toString();
String path = JortageProxy.hashToPath(hashStr);
// don't fall afoul of request rate limits // don't fall afoul of request rate limits
Thread.sleep(500); Thread.sleep(500);
BlobMetadata meta = delegate().blobMetadata(mpu.containerName(), mpu.blobName()); BlobMetadata meta = delegate().blobMetadata(mpu.containerName(), mpu.blobName());
@ -265,9 +261,9 @@ public class JortageBlobStore extends ForwardingBlobStore {
Thread.sleep(500); Thread.sleep(500);
etag = delegate().blobMetadata(bucket, path).getETag(); etag = delegate().blobMetadata(bucket, path).getETag();
} }
paths.put(buildKey(Preconditions.checkNotNull(meta.getUserMetadata().get("jortage-originalname"))), hash); Queries.putMap(dataSource, identity, Preconditions.checkNotNull(meta.getUserMetadata().get("jortage-originalname")), hash);
paths.remove("multipart:"+origKey); Queries.putFilesize(dataSource, hash, counter.getCount());
paths.remove("multipart-rev:"+mpu.blobName()); Queries.removeMultipart(dataSource, mpu.blobName());
Thread.sleep(500); Thread.sleep(500);
delegate().removeBlob(mpu.containerName(), mpu.blobName()); delegate().removeBlob(mpu.containerName(), mpu.blobName());
} catch (IOException e) { } catch (IOException e) {

Wyświetl plik

@ -1,21 +1,14 @@
package com.jortage.proxy; package com.jortage.proxy;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File; import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException; import java.io.IOException;
import java.net.URI; import java.net.URI;
import java.text.SimpleDateFormat; import java.sql.Connection;
import java.util.Date; import java.sql.SQLException;
import java.sql.Statement;
import java.util.List; import java.util.List;
import java.util.Map;
import java.util.Properties; import java.util.Properties;
import java.util.Map.Entry; import java.util.Map.Entry;
import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;
import javax.servlet.ServletException; import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpServletResponse;
@ -28,20 +21,17 @@ import org.eclipse.jetty.util.thread.QueuedThreadPool;
import org.gaul.s3proxy.AuthenticationType; import org.gaul.s3proxy.AuthenticationType;
import org.gaul.s3proxy.BlobStoreLocator; import org.gaul.s3proxy.BlobStoreLocator;
import org.gaul.s3proxy.S3Proxy; import org.gaul.s3proxy.S3Proxy;
import org.h2.mvstore.MVMap;
import org.h2.mvstore.MVStore;
import org.h2.mvstore.type.StringDataType;
import org.jclouds.ContextBuilder; import org.jclouds.ContextBuilder;
import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.BlobStore;
import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.blobstore.BlobStoreContext;
import org.jclouds.logging.slf4j.config.SLF4JLoggingModule; import org.jclouds.logging.slf4j.config.SLF4JLoggingModule;
import org.mariadb.jdbc.MariaDbPoolDataSource;
import com.google.common.base.Charsets;
import com.google.common.base.Splitter; import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps; import com.google.common.collect.Maps;
import com.google.common.io.Files; import com.google.common.escape.Escaper;
import com.google.common.net.UrlEscapers;
import blue.endless.jankson.Jankson; import blue.endless.jankson.Jankson;
import blue.endless.jankson.JsonObject; import blue.endless.jankson.JsonObject;
@ -57,62 +47,15 @@ public class JortageProxy {
private static BlobStore backingBlobStore; private static BlobStore backingBlobStore;
private static String bucket; private static String bucket;
private static String publicHost; private static String publicHost;
private static MariaDbPoolDataSource dataSource;
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {
reloadConfig(); reloadConfig();
File restore = new File("data.dat.gz");
File mv = new File("data.mv");
boolean doRestore = false;
if (!mv.exists() && restore.exists()) {
doRestore = true;
}
MVStore store = new MVStore.Builder()
.fileName("data.mv")
.cacheConcurrency(4)
.cacheSize(32)
.compress()
.open();
MVMap<String, String> paths = store.openMap("paths", new MVMap.Builder<String, String>()
.keyType(new StringDataType())
.valueType(new StringDataType()));
if (doRestore) {
int recovered = 0;
int total = 0;
boolean success = false;
try (DataInputStream dis = new DataInputStream(new GZIPInputStream(new FileInputStream(restore)))) {
int count = dis.readInt();
total = count;
for (int i = 0; i < count; i++) {
byte[] key = new byte[dis.readInt()];
dis.readFully(key);
byte[] val = new byte[dis.readInt()];
dis.readFully(val);
String keyStr = new String(key, Charsets.UTF_8);
String valStr = new String(val, Charsets.UTF_8);
paths.put(keyStr, valStr);
recovered++;
}
System.err.println("Recovery successful! Recovered "+recovered+" entries.");
success = true;
} catch (IOException e) {
e.printStackTrace();
if (recovered == 0) {
System.err.println("Recovery failed! No data could be restored!");
} else {
System.err.println("Recovery failed; only "+recovered+"/"+total+" entries could be restored!");
}
}
if (success) {
restore.delete();
}
}
S3Proxy s3Proxy = S3Proxy.builder() S3Proxy s3Proxy = S3Proxy.builder()
.awsAuthentication(AuthenticationType.AWS_V2_OR_V4, "DUMMY", "DUMMY") .awsAuthentication(AuthenticationType.AWS_V2_OR_V4, "DUMMY", "DUMMY")
.endpoint(URI.create("http://localhost:23278")) .endpoint(URI.create("http://localhost:23278"))
.jettyMaxThreads(4) .jettyMaxThreads(24)
.v4MaxNonChunkedRequestSize(128L*1024L*1024L) .v4MaxNonChunkedRequestSize(128L*1024L*1024L)
.build(); .build();
@ -122,7 +65,7 @@ public class JortageProxy {
public Entry<String, BlobStore> locateBlobStore(String identity, String container, String blob) { public Entry<String, BlobStore> locateBlobStore(String identity, String container, String blob) {
if (System.currentTimeMillis()-configFileLastLoaded > 500 && configFile.lastModified() > configFileLastLoaded) reloadConfig(); if (System.currentTimeMillis()-configFileLastLoaded > 500 && configFile.lastModified() > configFileLastLoaded) reloadConfig();
if (config.containsKey("users") && config.getObject("users").containsKey(identity)) { if (config.containsKey("users") && config.getObject("users").containsKey(identity)) {
return Maps.immutableEntry(((JsonPrimitive)config.getObject("users").get(identity)).asString(), new JortageBlobStore(backingBlobStore, bucket, identity, paths)); return Maps.immutableEntry(((JsonPrimitive)config.getObject("users").get(identity)).asString(), new JortageBlobStore(backingBlobStore, bucket, identity, dataSource));
} else { } else {
throw new RuntimeException("Access denied"); throw new RuntimeException("Access denied");
} }
@ -132,7 +75,7 @@ public class JortageProxy {
s3Proxy.start(); s3Proxy.start();
System.err.println("S3 listening on localhost:23278"); System.err.println("S3 listening on localhost:23278");
QueuedThreadPool pool = new QueuedThreadPool(12); QueuedThreadPool pool = new QueuedThreadPool(24);
pool.setName("Redir-Jetty"); pool.setName("Redir-Jetty");
Server redir = new Server(pool); Server redir = new Server(pool);
ServerConnector conn = new ServerConnector(redir); ServerConnector conn = new ServerConnector(redir);
@ -156,62 +99,18 @@ public class JortageProxy {
} else { } else {
String identity = split.get(0); String identity = split.get(0);
String name = split.get(1); String name = split.get(1);
String key = buildKey(identity, name); try {
if (paths.containsKey(key)) { String hash = Queries.getMap(dataSource, identity, name).toString();
response.setHeader("Location", publicHost+"/"+hashToPath(paths.get(key))); response.setHeader("Location", publicHost+"/"+hashToPath(hash));
response.setStatus(301); response.setStatus(301);
return; } catch (IllegalArgumentException e) {
} else {
response.sendError(404); response.sendError(404);
return;
} }
} }
} }
}); });
redir.start(); redir.start();
System.err.println("Redirector listening on localhost:23279"); System.err.println("Redirector listening on localhost:23279");
SimpleDateFormat dateFormat = new SimpleDateFormat("YYYY-MM-DD_HH-mm-ss");
int i = 0;
while (true) {
Thread.sleep(15000);
store.commit();
i++;
// every 10 minutes (roughly)
// FIXME this is causing OOMEs in production
if (false && i % 40 == 0) {
System.err.println("Creating backup...");
File backups = new File("backups");
if (!backups.exists()) {
backups.mkdir();
Files.write(
"These are gzipped dumps of the contents of `data.mv` in an ad-hoc format\n"
+ "that can be restored by jortage-proxy in the event of data loss. To cause\n"
+ "such a restore, delete `data.mv` and copy one of these files to `data.dat.gz`\n"
+ "in the jortage-proxy directory. Upon start, jortage-proxy will load the\n"
+ "data in the dump into the newly-created `data.mv` and then delete `data.dat.gz`.",
new File(backups, "README.txt"), Charsets.UTF_8);
}
File f = new File("backups/"+dateFormat.format(new Date())+".dat.gz");
try (DataOutputStream dos = new DataOutputStream(new GZIPOutputStream(new FileOutputStream(f)))) {
List<Map.Entry<String, String>> entries = Lists.newArrayList(paths.entrySet());
dos.writeInt(entries.size());
for (Map.Entry<String, String> en : entries) {
byte[] keyBys = en.getKey().getBytes(Charsets.UTF_8);
byte[] valBys = en.getValue().getBytes(Charsets.UTF_8);
dos.writeInt(keyBys.length);
dos.write(keyBys);
dos.writeInt(valBys.length);
dos.write(valBys);
}
System.err.println("Backup successful.");
} catch (IOException e) {
e.printStackTrace();
System.err.println("Failed to write backup!");
}
}
}
} }
private static void reloadConfig() { private static void reloadConfig() {
@ -228,15 +127,54 @@ public class JortageProxy {
.overrides(props) .overrides(props)
.build(BlobStoreContext.class) .build(BlobStoreContext.class)
.getBlobStore(); .getBlobStore();
JsonObject sql = config.getObject("mysql");
String sqlHost = ((JsonPrimitive)sql.get("host")).asString();
int sqlPort = ((Number)((JsonPrimitive)sql.get("port")).getValue()).intValue();
String sqlDb = ((JsonPrimitive)sql.get("database")).asString();
String sqlUser = ((JsonPrimitive)sql.get("user")).asString();
String sqlPass = ((JsonPrimitive)sql.get("pass")).asString();
Escaper pesc = UrlEscapers.urlPathSegmentEscaper();
Escaper esc = UrlEscapers.urlFormParameterEscaper();
if (dataSource != null) {
dataSource.close();
}
dataSource = new MariaDbPoolDataSource("jdbc:mariadb://"+pesc.escape(sqlHost)+":"+sqlPort+"/"+pesc.escape(sqlDb)+"?user="+esc.escape(sqlUser)+"&password="+esc.escape(sqlPass)+"&autoReconnect=true");
try (Connection c = dataSource.getConnection()) {
execOneshot(c, "CREATE TABLE `name_map` (\n" +
" `id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,\n" +
" `identity` VARCHAR(255) NOT NULL,\n" +
" `name` VARCHAR(255) NOT NULL,\n" +
" `hash` BINARY(64) NOT NULL,\n" +
" PRIMARY KEY (`id`),\n" +
" UNIQUE INDEX `forward` (`identity`, `name`),\n" +
" INDEX `reverse` (`hash`)\n" +
") ROW_FORMAT=COMPRESSED;");
execOneshot(c, "CREATE TABLE `multipart_uploads` (\n" +
" `id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,\n" +
" `identity` VARCHAR(255) NOT NULL,\n" +
" `name` VARCHAR(255) NOT NULL,\n" +
" `tempfile` VARCHAR(255) NOT NULL,\n" +
" PRIMARY KEY (`id`),\n" +
" UNIQUE INDEX `forward` (`identity`, `name`),\n" +
" UNIQUE INDEX `reverse` (`tempfile`)\n" +
") ROW_FORMAT=COMPRESSED;");
execOneshot(c, "CREATE TABLE `filesizes` (\n" +
" `hash` BINARY(64) NOT NULL,\n" +
" `size` BIGINT UNSIGNED NOT NULL,\n" +
" PRIMARY KEY (`hash`)\n" +
") ROW_FORMAT=COMPRESSED;");
}
System.err.println("Config file reloaded."); System.err.println("Config file reloaded.");
} catch (Exception e) { } catch (Exception e) {
e.printStackTrace(); e.printStackTrace();
System.err.println("Failed to reload config. The config is not changed."); System.err.println("Failed to reload config. Behavior unchanged.");
} }
} }
public static String buildKey(String identity, String name) { private static void execOneshot(Connection c, String sql) throws SQLException {
return identity+":"+name; try (Statement s = c.createStatement()) {
s.execute(sql);
}
} }
public static String hashToPath(String hash) { public static String hashToPath(String hash) {

Wyświetl plik

@ -0,0 +1,118 @@
package com.jortage.proxy;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import javax.sql.DataSource;
import com.google.common.hash.HashCode;
public class Queries {
public static HashCode getMap(DataSource dataSource, String identity, String name) {
try (Connection c = dataSource.getConnection()) {
try (PreparedStatement ps = c.prepareStatement("SELECT `hash` FROM `name_map` WHERE `identity` = ? AND `name` = ?;")) {
ps.setString(1, identity);
ps.setString(2, name);
try (ResultSet rs = ps.executeQuery()) {
if (rs.first()) {
return HashCode.fromBytes(rs.getBytes("hash"));
} else {
throw new IllegalArgumentException("Not found");
}
}
}
} catch (SQLException e) {
throw new RuntimeException(e);
}
}
public static void putMap(DataSource dataSource, String identity, String name, HashCode hash) {
try (Connection c = dataSource.getConnection()) {
try (PreparedStatement ps = c.prepareStatement("INSERT INTO `name_map` (`identity`, `name`, `hash`) VALUES (?, ?, ?) ON DUPLICATE KEY UPDATE `hash` = ?;")) {
ps.setString(1, identity);
ps.setString(2, name);
ps.setBytes(3, hash.asBytes());
ps.setBytes(4, hash.asBytes());
ps.executeUpdate();
}
} catch (SQLException e) {
throw new RuntimeException(e);
}
}
public static void putFilesize(DataSource dataSource, HashCode hash, long size) {
try (Connection c = dataSource.getConnection()) {
try (PreparedStatement ps = c.prepareStatement("INSERT IGNORE INTO `filesizes` (`hash`, `size`) VALUES (?, ?);")) {
ps.setBytes(1, hash.asBytes());
ps.setLong(2, size);
ps.executeUpdate();
}
} catch (SQLException e) {
throw new RuntimeException(e);
}
}
public static void putMultipart(DataSource dataSource, String identity, String name, String tempfile) {
try (Connection c = dataSource.getConnection()) {
try (PreparedStatement ps = c.prepareStatement("INSERT INTO `multipart_uploads` (`identity`, `name`, `tempfile`) VALUES (?, ?, ?) ON DUPLICATE KEY UPDATE `tempfile` = ?;")) {
ps.setString(1, identity);
ps.setString(2, name);
ps.setString(3, tempfile);
ps.setString(4, tempfile);
ps.executeUpdate();
}
} catch (SQLException e) {
throw new RuntimeException(e);
}
}
public static String getMultipart(DataSource dataSource, String identity, String name) {
try (Connection c = dataSource.getConnection()) {
try (PreparedStatement ps = c.prepareStatement("SELECT `tempfile` FROM `multipart_uploads` WHERE `identity` = ? AND `name` = ?;")) {
ps.setString(1, identity);
ps.setString(2, name);
try (ResultSet rs = ps.executeQuery()) {
if (rs.first()) {
return rs.getString("tempfile");
} else {
throw new IllegalArgumentException("Not found");
}
}
}
} catch (SQLException e) {
throw new RuntimeException(e);
}
}
public static String getMultipartRev(DataSource dataSource, String tempfile) {
try (Connection c = dataSource.getConnection()) {
try (PreparedStatement ps = c.prepareStatement("SELECT `name` FROM `multipart_uploads` WHERE `tempfile` = ?;")) {
ps.setString(1, tempfile);
try (ResultSet rs = ps.executeQuery()) {
if (rs.first()) {
return rs.getString("name");
} else {
throw new IllegalArgumentException("Not found");
}
}
}
} catch (SQLException e) {
throw new RuntimeException(e);
}
}
public static void removeMultipart(DataSource dataSource, String tempfile) {
try (Connection c = dataSource.getConnection()) {
try (PreparedStatement ps = c.prepareStatement("DELETE FROM `multipart_uploads` WHERE `tempfile` = ?;")) {
ps.setString(1, tempfile);
ps.executeUpdate();
}
} catch (SQLException e) {
throw new RuntimeException(e);
}
}
}