Replace MVStore with MySQL

trunk
Una Thompson 2019-09-22 02:12:06 -07:00
rodzic 096dab30a5
commit e924145ef1
5 zmienionych plików z 225 dodań i 166 usunięć

Wyświetl plik

@ -9,7 +9,7 @@ repositories {
dependencies {
implementation 'blue.endless:jankson:1.1.2'
implementation 'com.h2database:h2-mvstore:1.4.199'
implementation 'org.mariadb.jdbc:mariadb-java-client:2.4.4'
implementation 'org.gaul:s3proxy:1.6.1'
}

Wyświetl plik

@ -6,6 +6,13 @@
bucket: "mybucket"
publicHost: "https://sfo2.digitaloceanspaces.com/mybucket"
}
mysql: {
host: "localhost"
port: 3306
user: "jortage"
pass: "password"
database: "jortage"
}
users: {
// ACCESS_KEY_ID: "SECRET_ACCESS_KEY"
test: "test"

Wyświetl plik

@ -6,9 +6,10 @@ import java.io.IOException;
import java.io.InputStream;
import java.io.UncheckedIOException;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import javax.sql.DataSource;
import org.jclouds.blobstore.BlobStore;
import org.jclouds.blobstore.BlobStoreContext;
import org.jclouds.blobstore.domain.Blob;
@ -35,24 +36,22 @@ import org.jclouds.io.payloads.FilePayload;
import com.google.common.base.Objects;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.hash.HashCode;
import com.google.common.hash.Hashing;
import com.google.common.hash.HashingOutputStream;
import com.google.common.io.ByteStreams;
import com.google.common.io.CountingOutputStream;
public class JortageBlobStore extends ForwardingBlobStore {
private final String identity;
private final String bucket;
private final Map<String, String> paths;
private final DataSource dataSource;
public JortageBlobStore(BlobStore blobStore, String bucket, String identity, Map<String, String> paths) {
public JortageBlobStore(BlobStore blobStore, String bucket, String identity, DataSource dataSource) {
super(blobStore);
this.bucket = bucket;
this.identity = identity;
this.paths = paths;
}
private String buildKey(String name) {
return JortageProxy.buildKey(identity, name);
this.dataSource = dataSource;
}
private void checkContainer(String container) {
@ -61,15 +60,9 @@ public class JortageBlobStore extends ForwardingBlobStore {
}
}
private String mapHash(String container, String name) {
private String getMapPath(String container, String name) {
checkContainer(container);
String hash = paths.get(buildKey(name));
if (hash == null) throw new IllegalArgumentException("Not found");
return hash;
}
private String map(String container, String name) {
return JortageProxy.hashToPath(mapHash(container, name));
return JortageProxy.hashToPath(Queries.getMap(dataSource, container, name).toString());
}
@Override
@ -84,32 +77,32 @@ public class JortageBlobStore extends ForwardingBlobStore {
@Override
public Blob getBlob(String container, String name) {
return delegate().getBlob(bucket, map(container, name));
return delegate().getBlob(bucket, getMapPath(container, name));
}
@Override
public Blob getBlob(String container, String name, GetOptions getOptions) {
return delegate().getBlob(bucket, map(container, name), getOptions);
return delegate().getBlob(bucket, getMapPath(container, name), getOptions);
}
@Override
public void downloadBlob(String container, String name, File destination) {
delegate().downloadBlob(bucket, map(container, name), destination);
delegate().downloadBlob(bucket, getMapPath(container, name), destination);
}
@Override
public void downloadBlob(String container, String name, File destination, ExecutorService executor) {
delegate().downloadBlob(bucket, map(container, name), destination, executor);
delegate().downloadBlob(bucket, getMapPath(container, name), destination, executor);
}
@Override
public InputStream streamBlob(String container, String name) {
return delegate().streamBlob(bucket, map(container, name));
return delegate().streamBlob(bucket, getMapPath(container, name));
}
@Override
public InputStream streamBlob(String container, String name, ExecutorService executor) {
return delegate().streamBlob(bucket, map(container, name), executor);
return delegate().streamBlob(bucket, getMapPath(container, name), executor);
}
@Override
@ -140,12 +133,12 @@ public class JortageBlobStore extends ForwardingBlobStore {
@Override
public boolean blobExists(String container, String name) {
return delegate().blobExists(bucket, map(container, name));
return delegate().blobExists(bucket, getMapPath(container, name));
}
@Override
public BlobMetadata blobMetadata(String container, String name) {
return delegate().blobMetadata(bucket, map(container, name));
return delegate().blobMetadata(bucket, getMapPath(container, name));
}
@Override
@ -176,26 +169,28 @@ public class JortageBlobStore extends ForwardingBlobStore {
File f = File.createTempFile("jortage-proxy-", ".dat");
tempFile = f;
String contentType = blob.getPayload().getContentMetadata().getContentType();
String hash;
HashCode hash;
try (InputStream is = blob.getPayload().openStream();
FileOutputStream fos = new FileOutputStream(f)) {
HashingOutputStream hos = new HashingOutputStream(Hashing.sha512(), fos);
ByteStreams.copy(is, hos);
hash = hos.hash().toString();
hash = hos.hash();
}
String hashString = hash.toString();
try (Payload payload = new FilePayload(f)) {
payload.getContentMetadata().setContentType(contentType);
if (delegate().blobExists(bucket, JortageProxy.hashToPath(hash))) {
String etag = delegate().blobMetadata(bucket, JortageProxy.hashToPath(hash)).getETag();
paths.put(buildKey(blob.getMetadata().getName()), hash);
if (delegate().blobExists(bucket, JortageProxy.hashToPath(hashString))) {
String etag = delegate().blobMetadata(bucket, JortageProxy.hashToPath(hashString)).getETag();
Queries.putMap(dataSource, identity, blob.getMetadata().getName(), hash);
return etag;
}
Blob blob2 = blobBuilder(JortageProxy.hashToPath(hash))
Blob blob2 = blobBuilder(JortageProxy.hashToPath(hashString))
.payload(payload)
.userMetadata(blob.getMetadata().getUserMetadata())
.build();
String etag = delegate().putBlob(bucket, blob2, new PutOptions().setBlobAccess(BlobAccess.PUBLIC_READ).multipart());
paths.put(buildKey(blob.getMetadata().getName()), hash);
Queries.putMap(dataSource, identity, blob.getMetadata().getName(), hash);
Queries.putFilesize(dataSource, hash, f.length());
return etag;
}
} catch (IOException e) {
@ -208,33 +203,33 @@ public class JortageBlobStore extends ForwardingBlobStore {
@Override
public String copyBlob(String fromContainer, String fromName, String toContainer, String toName, CopyOptions options) {
// javadoc says options are ignored, so we ignore them too
checkContainer(fromContainer);
checkContainer(toContainer);
String hash = mapHash(fromContainer, fromName);
paths.put(buildKey(toName), hash);
return blobMetadata(bucket, JortageProxy.hashToPath(hash)).getETag();
HashCode hash = Queries.getMap(dataSource, identity, fromName);
Queries.putMap(dataSource, identity, toName, hash);
return blobMetadata(bucket, JortageProxy.hashToPath(hash.toString())).getETag();
}
@Override
public MultipartUpload initiateMultipartUpload(String container, BlobMetadata blobMetadata, PutOptions options) {
checkContainer(container);
MutableBlobMetadata mbm = new MutableBlobMetadataImpl(blobMetadata);
String name = "multitmp/"+identity+"-"+System.currentTimeMillis()+"-"+System.nanoTime();
mbm.setName(name);
String tempfile = "multitmp/"+identity+"-"+System.currentTimeMillis()+"-"+System.nanoTime();
mbm.setName(tempfile);
mbm.getUserMetadata().put("jortage-creator", identity);
mbm.getUserMetadata().put("jortage-originalname", blobMetadata.getName());
paths.put("multipart:"+buildKey(blobMetadata.getName()), name);
paths.put("multipart-rev:"+name, blobMetadata.getName());
Queries.putMultipart(dataSource, identity, blobMetadata.getName(), tempfile);
return delegate().initiateMultipartUpload(bucket, mbm, new PutOptions().setBlobAccess(BlobAccess.PUBLIC_READ));
}
private MultipartUpload mask(MultipartUpload mpu) {
checkContainer(mpu.containerName());
return MultipartUpload.create(bucket, Preconditions.checkNotNull(paths.get("multipart:"+buildKey(mpu.blobName()))), mpu.id(), mpu.blobMetadata(), new PutOptions().setBlobAccess(BlobAccess.PUBLIC_READ));
return MultipartUpload.create(bucket, Queries.getMultipart(dataSource, identity, mpu.blobName()), mpu.id(), mpu.blobMetadata(), new PutOptions().setBlobAccess(BlobAccess.PUBLIC_READ));
}
private MultipartUpload revmask(MultipartUpload mpu) {
checkContainer(mpu.containerName());
return MultipartUpload.create(bucket, Preconditions.checkNotNull(paths.get("multipart-rev:"+mpu.blobName())), mpu.id(), mpu.blobMetadata(), new PutOptions().setBlobAccess(BlobAccess.PUBLIC_READ));
return MultipartUpload.create(bucket, Queries.getMultipartRev(dataSource, mpu.blobName()), mpu.id(), mpu.blobMetadata(), new PutOptions().setBlobAccess(BlobAccess.PUBLIC_READ));
}
@Override
@ -244,15 +239,16 @@ public class JortageBlobStore extends ForwardingBlobStore {
@Override
public String completeMultipartUpload(MultipartUpload mpu, List<MultipartPart> parts) {
String origKey = buildKey(mpu.blobName());
mpu = mask(mpu);
// TODO this is a bit of a hack and isn't very efficient
String etag = delegate().completeMultipartUpload(mpu, parts);
try (InputStream stream = delegate().getBlob(mpu.containerName(), mpu.blobName()).getPayload().openStream()) {
HashingOutputStream hos = new HashingOutputStream(Hashing.sha512(), ByteStreams.nullOutputStream());
CountingOutputStream counter = new CountingOutputStream(ByteStreams.nullOutputStream());
HashingOutputStream hos = new HashingOutputStream(Hashing.sha512(), counter);
ByteStreams.copy(stream, hos);
String hash = hos.hash().toString();
String path = JortageProxy.hashToPath(hash);
HashCode hash = hos.hash();
String hashStr = hash.toString();
String path = JortageProxy.hashToPath(hashStr);
// don't fall afoul of request rate limits
Thread.sleep(500);
BlobMetadata meta = delegate().blobMetadata(mpu.containerName(), mpu.blobName());
@ -265,9 +261,9 @@ public class JortageBlobStore extends ForwardingBlobStore {
Thread.sleep(500);
etag = delegate().blobMetadata(bucket, path).getETag();
}
paths.put(buildKey(Preconditions.checkNotNull(meta.getUserMetadata().get("jortage-originalname"))), hash);
paths.remove("multipart:"+origKey);
paths.remove("multipart-rev:"+mpu.blobName());
Queries.putMap(dataSource, identity, Preconditions.checkNotNull(meta.getUserMetadata().get("jortage-originalname")), hash);
Queries.putFilesize(dataSource, hash, counter.getCount());
Queries.removeMultipart(dataSource, mpu.blobName());
Thread.sleep(500);
delegate().removeBlob(mpu.containerName(), mpu.blobName());
} catch (IOException e) {

Wyświetl plik

@ -1,21 +1,14 @@
package com.jortage.proxy;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.URI;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Map.Entry;
import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
@ -28,20 +21,17 @@ import org.eclipse.jetty.util.thread.QueuedThreadPool;
import org.gaul.s3proxy.AuthenticationType;
import org.gaul.s3proxy.BlobStoreLocator;
import org.gaul.s3proxy.S3Proxy;
import org.h2.mvstore.MVMap;
import org.h2.mvstore.MVStore;
import org.h2.mvstore.type.StringDataType;
import org.jclouds.ContextBuilder;
import org.jclouds.blobstore.BlobStore;
import org.jclouds.blobstore.BlobStoreContext;
import org.jclouds.logging.slf4j.config.SLF4JLoggingModule;
import org.mariadb.jdbc.MariaDbPoolDataSource;
import com.google.common.base.Charsets;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.io.Files;
import com.google.common.escape.Escaper;
import com.google.common.net.UrlEscapers;
import blue.endless.jankson.Jankson;
import blue.endless.jankson.JsonObject;
@ -57,62 +47,15 @@ public class JortageProxy {
private static BlobStore backingBlobStore;
private static String bucket;
private static String publicHost;
private static MariaDbPoolDataSource dataSource;
public static void main(String[] args) throws Exception {
reloadConfig();
File restore = new File("data.dat.gz");
File mv = new File("data.mv");
boolean doRestore = false;
if (!mv.exists() && restore.exists()) {
doRestore = true;
}
MVStore store = new MVStore.Builder()
.fileName("data.mv")
.cacheConcurrency(4)
.cacheSize(32)
.compress()
.open();
MVMap<String, String> paths = store.openMap("paths", new MVMap.Builder<String, String>()
.keyType(new StringDataType())
.valueType(new StringDataType()));
if (doRestore) {
int recovered = 0;
int total = 0;
boolean success = false;
try (DataInputStream dis = new DataInputStream(new GZIPInputStream(new FileInputStream(restore)))) {
int count = dis.readInt();
total = count;
for (int i = 0; i < count; i++) {
byte[] key = new byte[dis.readInt()];
dis.readFully(key);
byte[] val = new byte[dis.readInt()];
dis.readFully(val);
String keyStr = new String(key, Charsets.UTF_8);
String valStr = new String(val, Charsets.UTF_8);
paths.put(keyStr, valStr);
recovered++;
}
System.err.println("Recovery successful! Recovered "+recovered+" entries.");
success = true;
} catch (IOException e) {
e.printStackTrace();
if (recovered == 0) {
System.err.println("Recovery failed! No data could be restored!");
} else {
System.err.println("Recovery failed; only "+recovered+"/"+total+" entries could be restored!");
}
}
if (success) {
restore.delete();
}
}
S3Proxy s3Proxy = S3Proxy.builder()
.awsAuthentication(AuthenticationType.AWS_V2_OR_V4, "DUMMY", "DUMMY")
.endpoint(URI.create("http://localhost:23278"))
.jettyMaxThreads(4)
.jettyMaxThreads(24)
.v4MaxNonChunkedRequestSize(128L*1024L*1024L)
.build();
@ -122,7 +65,7 @@ public class JortageProxy {
public Entry<String, BlobStore> locateBlobStore(String identity, String container, String blob) {
if (System.currentTimeMillis()-configFileLastLoaded > 500 && configFile.lastModified() > configFileLastLoaded) reloadConfig();
if (config.containsKey("users") && config.getObject("users").containsKey(identity)) {
return Maps.immutableEntry(((JsonPrimitive)config.getObject("users").get(identity)).asString(), new JortageBlobStore(backingBlobStore, bucket, identity, paths));
return Maps.immutableEntry(((JsonPrimitive)config.getObject("users").get(identity)).asString(), new JortageBlobStore(backingBlobStore, bucket, identity, dataSource));
} else {
throw new RuntimeException("Access denied");
}
@ -132,7 +75,7 @@ public class JortageProxy {
s3Proxy.start();
System.err.println("S3 listening on localhost:23278");
QueuedThreadPool pool = new QueuedThreadPool(12);
QueuedThreadPool pool = new QueuedThreadPool(24);
pool.setName("Redir-Jetty");
Server redir = new Server(pool);
ServerConnector conn = new ServerConnector(redir);
@ -156,62 +99,18 @@ public class JortageProxy {
} else {
String identity = split.get(0);
String name = split.get(1);
String key = buildKey(identity, name);
if (paths.containsKey(key)) {
response.setHeader("Location", publicHost+"/"+hashToPath(paths.get(key)));
try {
String hash = Queries.getMap(dataSource, identity, name).toString();
response.setHeader("Location", publicHost+"/"+hashToPath(hash));
response.setStatus(301);
return;
} else {
} catch (IllegalArgumentException e) {
response.sendError(404);
return;
}
}
}
});
redir.start();
System.err.println("Redirector listening on localhost:23279");
SimpleDateFormat dateFormat = new SimpleDateFormat("YYYY-MM-DD_HH-mm-ss");
int i = 0;
while (true) {
Thread.sleep(15000);
store.commit();
i++;
// every 10 minutes (roughly)
// FIXME this is causing OOMEs in production
if (false && i % 40 == 0) {
System.err.println("Creating backup...");
File backups = new File("backups");
if (!backups.exists()) {
backups.mkdir();
Files.write(
"These are gzipped dumps of the contents of `data.mv` in an ad-hoc format\n"
+ "that can be restored by jortage-proxy in the event of data loss. To cause\n"
+ "such a restore, delete `data.mv` and copy one of these files to `data.dat.gz`\n"
+ "in the jortage-proxy directory. Upon start, jortage-proxy will load the\n"
+ "data in the dump into the newly-created `data.mv` and then delete `data.dat.gz`.",
new File(backups, "README.txt"), Charsets.UTF_8);
}
File f = new File("backups/"+dateFormat.format(new Date())+".dat.gz");
try (DataOutputStream dos = new DataOutputStream(new GZIPOutputStream(new FileOutputStream(f)))) {
List<Map.Entry<String, String>> entries = Lists.newArrayList(paths.entrySet());
dos.writeInt(entries.size());
for (Map.Entry<String, String> en : entries) {
byte[] keyBys = en.getKey().getBytes(Charsets.UTF_8);
byte[] valBys = en.getValue().getBytes(Charsets.UTF_8);
dos.writeInt(keyBys.length);
dos.write(keyBys);
dos.writeInt(valBys.length);
dos.write(valBys);
}
System.err.println("Backup successful.");
} catch (IOException e) {
e.printStackTrace();
System.err.println("Failed to write backup!");
}
}
}
}
private static void reloadConfig() {
@ -228,15 +127,54 @@ public class JortageProxy {
.overrides(props)
.build(BlobStoreContext.class)
.getBlobStore();
JsonObject sql = config.getObject("mysql");
String sqlHost = ((JsonPrimitive)sql.get("host")).asString();
int sqlPort = ((Number)((JsonPrimitive)sql.get("port")).getValue()).intValue();
String sqlDb = ((JsonPrimitive)sql.get("database")).asString();
String sqlUser = ((JsonPrimitive)sql.get("user")).asString();
String sqlPass = ((JsonPrimitive)sql.get("pass")).asString();
Escaper pesc = UrlEscapers.urlPathSegmentEscaper();
Escaper esc = UrlEscapers.urlFormParameterEscaper();
if (dataSource != null) {
dataSource.close();
}
dataSource = new MariaDbPoolDataSource("jdbc:mariadb://"+pesc.escape(sqlHost)+":"+sqlPort+"/"+pesc.escape(sqlDb)+"?user="+esc.escape(sqlUser)+"&password="+esc.escape(sqlPass)+"&autoReconnect=true");
try (Connection c = dataSource.getConnection()) {
execOneshot(c, "CREATE TABLE `name_map` (\n" +
" `id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,\n" +
" `identity` VARCHAR(255) NOT NULL,\n" +
" `name` VARCHAR(255) NOT NULL,\n" +
" `hash` BINARY(64) NOT NULL,\n" +
" PRIMARY KEY (`id`),\n" +
" UNIQUE INDEX `forward` (`identity`, `name`),\n" +
" INDEX `reverse` (`hash`)\n" +
") ROW_FORMAT=COMPRESSED;");
execOneshot(c, "CREATE TABLE `multipart_uploads` (\n" +
" `id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,\n" +
" `identity` VARCHAR(255) NOT NULL,\n" +
" `name` VARCHAR(255) NOT NULL,\n" +
" `tempfile` VARCHAR(255) NOT NULL,\n" +
" PRIMARY KEY (`id`),\n" +
" UNIQUE INDEX `forward` (`identity`, `name`),\n" +
" UNIQUE INDEX `reverse` (`tempfile`)\n" +
") ROW_FORMAT=COMPRESSED;");
execOneshot(c, "CREATE TABLE `filesizes` (\n" +
" `hash` BINARY(64) NOT NULL,\n" +
" `size` BIGINT UNSIGNED NOT NULL,\n" +
" PRIMARY KEY (`hash`)\n" +
") ROW_FORMAT=COMPRESSED;");
}
System.err.println("Config file reloaded.");
} catch (Exception e) {
e.printStackTrace();
System.err.println("Failed to reload config. The config is not changed.");
System.err.println("Failed to reload config. Behavior unchanged.");
}
}
public static String buildKey(String identity, String name) {
return identity+":"+name;
private static void execOneshot(Connection c, String sql) throws SQLException {
try (Statement s = c.createStatement()) {
s.execute(sql);
}
}
public static String hashToPath(String hash) {

Wyświetl plik

@ -0,0 +1,118 @@
package com.jortage.proxy;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import javax.sql.DataSource;
import com.google.common.hash.HashCode;
public class Queries {
public static HashCode getMap(DataSource dataSource, String identity, String name) {
try (Connection c = dataSource.getConnection()) {
try (PreparedStatement ps = c.prepareStatement("SELECT `hash` FROM `name_map` WHERE `identity` = ? AND `name` = ?;")) {
ps.setString(1, identity);
ps.setString(2, name);
try (ResultSet rs = ps.executeQuery()) {
if (rs.first()) {
return HashCode.fromBytes(rs.getBytes("hash"));
} else {
throw new IllegalArgumentException("Not found");
}
}
}
} catch (SQLException e) {
throw new RuntimeException(e);
}
}
public static void putMap(DataSource dataSource, String identity, String name, HashCode hash) {
try (Connection c = dataSource.getConnection()) {
try (PreparedStatement ps = c.prepareStatement("INSERT INTO `name_map` (`identity`, `name`, `hash`) VALUES (?, ?, ?) ON DUPLICATE KEY UPDATE `hash` = ?;")) {
ps.setString(1, identity);
ps.setString(2, name);
ps.setBytes(3, hash.asBytes());
ps.setBytes(4, hash.asBytes());
ps.executeUpdate();
}
} catch (SQLException e) {
throw new RuntimeException(e);
}
}
public static void putFilesize(DataSource dataSource, HashCode hash, long size) {
try (Connection c = dataSource.getConnection()) {
try (PreparedStatement ps = c.prepareStatement("INSERT IGNORE INTO `filesizes` (`hash`, `size`) VALUES (?, ?);")) {
ps.setBytes(1, hash.asBytes());
ps.setLong(2, size);
ps.executeUpdate();
}
} catch (SQLException e) {
throw new RuntimeException(e);
}
}
public static void putMultipart(DataSource dataSource, String identity, String name, String tempfile) {
try (Connection c = dataSource.getConnection()) {
try (PreparedStatement ps = c.prepareStatement("INSERT INTO `multipart_uploads` (`identity`, `name`, `tempfile`) VALUES (?, ?, ?) ON DUPLICATE KEY UPDATE `tempfile` = ?;")) {
ps.setString(1, identity);
ps.setString(2, name);
ps.setString(3, tempfile);
ps.setString(4, tempfile);
ps.executeUpdate();
}
} catch (SQLException e) {
throw new RuntimeException(e);
}
}
public static String getMultipart(DataSource dataSource, String identity, String name) {
try (Connection c = dataSource.getConnection()) {
try (PreparedStatement ps = c.prepareStatement("SELECT `tempfile` FROM `multipart_uploads` WHERE `identity` = ? AND `name` = ?;")) {
ps.setString(1, identity);
ps.setString(2, name);
try (ResultSet rs = ps.executeQuery()) {
if (rs.first()) {
return rs.getString("tempfile");
} else {
throw new IllegalArgumentException("Not found");
}
}
}
} catch (SQLException e) {
throw new RuntimeException(e);
}
}
public static String getMultipartRev(DataSource dataSource, String tempfile) {
try (Connection c = dataSource.getConnection()) {
try (PreparedStatement ps = c.prepareStatement("SELECT `name` FROM `multipart_uploads` WHERE `tempfile` = ?;")) {
ps.setString(1, tempfile);
try (ResultSet rs = ps.executeQuery()) {
if (rs.first()) {
return rs.getString("name");
} else {
throw new IllegalArgumentException("Not found");
}
}
}
} catch (SQLException e) {
throw new RuntimeException(e);
}
}
public static void removeMultipart(DataSource dataSource, String tempfile) {
try (Connection c = dataSource.getConnection()) {
try (PreparedStatement ps = c.prepareStatement("DELETE FROM `multipart_uploads` WHERE `tempfile` = ?;")) {
ps.setString(1, tempfile);
ps.executeUpdate();
}
} catch (SQLException e) {
throw new RuntimeException(e);
}
}
}