Porównaj commity

...

5 Commity

Autor SHA1 Wiadomość Data
Una Thompson 6cc1cc7fae
Replace built-in trash with HikariCP, enable perf options
Also don't keep around the entire config JSON in memory for no reason
2023-07-23 11:38:08 -07:00
Una Thompson fc79bb51f4
Fix blunder, Gradle 8 deprecation nonsense, compile for 11 2023-07-23 00:37:49 -07:00
Una Thompson 911c712be3
Fix slf4j and blob store driver name 2023-07-23 00:22:20 -07:00
Una Thompson e7b66883d1
Dependency updates, modernize Gradle setup, fix Misskey compat
Also a few things I forgot to commit and don't know what changed anymore.
Something to do with multipart uploads, and removing some unnecessary error throws.
2023-07-23 00:18:01 -07:00
Una Thompson fc72c27ce1
Use a log driver 2023-07-22 23:56:02 -07:00
9 zmienionych plików z 179 dodań i 116 usunięć

Wyświetl plik

@ -1,12 +1,22 @@
plugins {
id 'com.github.johnrengelman.shadow' version '8.1.1'
id 'java'
id 'us.kirchmeier.capsule' version '1.0.2'
id 'com.github.ben-manes.versions' version '0.47.0'
}
repositories {
mavenCentral()
}
base {
archivesName = 'jortage-poolmgr'
version = '1.4.2'
}
compileJava {
options.release = 11
}
sourceSets {
main {
java {
@ -15,38 +25,63 @@ sourceSets {
}
}
configurations.all {
resolutionStrategy {
force 'org.slf4j:slf4j-api:1.7.36'
}
}
dependencies {
implementation 'blue.endless:jankson:1.1.2'
implementation 'org.mariadb.jdbc:mariadb-java-client:2.4.4'
implementation 'com.squareup.okhttp3:okhttp:4.7.2'
implementation 'com.squareup.okhttp3:okhttp-brotli:4.7.2'
implementation 'blue.endless:jankson:1.2.3'
implementation 'com.squareup.okhttp3:okhttp:4.11.0'
implementation 'com.squareup.okhttp3:okhttp-brotli:4.11.0'
implementation 'org.apache.jclouds:jclouds-blobstore:2.2.1'
implementation 'org.apache.jclouds.provider:aws-s3:2.2.1'
implementation 'org.apache.jclouds.api:filesystem:2.2.1'
implementation 'org.apache.jclouds.driver:jclouds-slf4j:2.2.1'
implementation 'org.mariadb.jdbc:mariadb-java-client:3.1.4'
implementation 'com.zaxxer:HikariCP:5.0.1'
implementation 'org.eclipse.jetty:jetty-server:9.4.24.v20191120'
implementation 'org.apache.jclouds:jclouds-blobstore:2.5.0'
implementation 'org.apache.jclouds.provider:aws-s3:2.5.0'
implementation 'org.apache.jclouds.api:filesystem:2.5.0'
implementation 'org.apache.jclouds.driver:jclouds-slf4j:2.5.0'
implementation 'org.slf4j:slf4j-api:1.7.9'
implementation 'org.slf4j:slf4j-nop:1.7.9'
implementation 'org.eclipse.jetty:jetty-server:11.0.15'
implementation 'org.slf4j:slf4j-api:1.7.36'
implementation 'org.slf4j:slf4j-simple:1.7.36'
implementation 'com.google.code.findbugs:jsr305:3.0.2'
implementation 'com.google.code.findbugs:findbugs-annotations:3.0.1'
implementation 'com.fasterxml.jackson.dataformat:jackson-dataformat-xml:2.11.0'
implementation 'com.fasterxml.woodstox:woodstox-core:6.2.1'
implementation 'com.fasterxml.jackson.dataformat:jackson-dataformat-xml:2.15.2'
implementation 'com.fasterxml.woodstox:woodstox-core:6.5.1'
implementation 'commons-fileupload:commons-fileupload:1.4'
implementation 'commons-fileupload:commons-fileupload:1.5'
}
// I am *not* pulling in five different dependencies for a couple classes we don't use
file('s3proxy/src/main/java/org/gaul/s3proxy/junit/S3ProxyRule.java').delete();
file('s3proxy/src/main/java/org/gaul/s3proxy/Main.java').delete();
// I am *not* pulling in seven different dependencies for classes we don't use
['junit/S3ProxyRule.java', 'junit/S3ProxyExtension.java', 'Main.java', 'EncryptedBlobStore.java'].each {
file('s3proxy/src/main/java/org/gaul/s3proxy/'+it).delete()
}
project.configurations.implementation.setCanBeResolved(true)
jar {
destinationDirectory.set(file('build/tmp'))
}
task capsule(type: FatCapsule) {
embedConfiguration configurations.implementation
applicationClass 'com.jortage.poolmgr.Poolmgr'
}
shadowJar {
configurations = [project.configurations.compileClasspath]
manifest.attributes (
'Main-Class': 'com.jortage.poolmgr.Poolmgr'
)
archiveClassifier = ''
}
build.dependsOn shadowJar
tasks.named("dependencyUpdates").configure {
gradleReleaseChannel = 'current'
revision = 'release'
rejectVersionIf {
it.candidate.version.contains("alpha") || it.candidate.version.contains("beta")
|| (it.candidate.group == 'org.slf4j' && it.candidate.version.startsWith("2."))
}
}

Wyświetl plik

@ -1,5 +1,5 @@
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-4.6-bin.zip
distributionUrl=https\://services.gradle.org/distributions/gradle-8.2.1-bin.zip
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists

@ -1 +1 @@
Subproject commit e638111d05b5e7193d2ae8dd1d0e7a63110d878b
Subproject commit cf4db284a61043a0e78175baae7f889f637eee7c

Wyświetl plik

@ -55,7 +55,6 @@ public class JortageBlobStore extends ForwardingBlobStore {
public JortageBlobStore(BlobStore blobStore, BlobStore dumpsStore, String bucket, String identity, DataSource dataSource) {
super(blobStore);
this.dumpsStore = dumpsStore;
dumpsStore.createContainerInLocation(null, identity);
this.bucket = bucket;
this.identity = identity;
this.dataSource = dataSource;
@ -220,8 +219,9 @@ public class JortageBlobStore extends ForwardingBlobStore {
String hashString = hash.toString();
try (Payload payload = new FilePayload(f)) {
payload.getContentMetadata().setContentType(contentType);
if (delegate().blobExists(bucket, Poolmgr.hashToPath(hashString))) {
String etag = delegate().blobMetadata(bucket, Poolmgr.hashToPath(hashString)).getETag();
BlobMetadata meta = delegate().blobMetadata(bucket, Poolmgr.hashToPath(hashString));
if (meta != null) {
String etag = meta.getETag();
Queries.putMap(dataSource, identity, blobName, hash);
return etag;
}
@ -302,47 +302,53 @@ public class JortageBlobStore extends ForwardingBlobStore {
@Override
public String completeMultipartUpload(MultipartUpload mpu, List<MultipartPart> parts) {
Poolmgr.checkReadOnly();
if (isDump(mpu.blobName())) {
checkContainer(mpu.containerName());
return dumpsStore.completeMultipartUpload(mpu, parts);
}
mpu = mask(mpu);
// TODO this is a bit of a hack and isn't very efficient
String etag = delegate().completeMultipartUpload(mpu, parts);
try (InputStream stream = delegate().getBlob(mpu.containerName(), mpu.blobName()).getPayload().openStream()) {
CountingOutputStream counter = new CountingOutputStream(ByteStreams.nullOutputStream());
HashingOutputStream hos = new HashingOutputStream(Hashing.sha512(), counter);
ByteStreams.copy(stream, hos);
HashCode hash = hos.hash();
String hashStr = hash.toString();
String path = Poolmgr.hashToPath(hashStr);
// we're about to do a bunch of stuff at once
// sleep so we don't fall afoul of request rate limits
// (causes intermittent 429s on at least DigitalOcean)
Thread.sleep(250);
BlobMetadata meta = delegate().blobMetadata(mpu.containerName(), mpu.blobName());
if (!delegate().blobExists(bucket, path)) {
Thread.sleep(250);
etag = delegate().copyBlob(mpu.containerName(), mpu.blobName(), bucket, path, CopyOptions.builder().contentMetadata(meta.getContentMetadata()).build());
Thread.sleep(250);
delegate().setBlobAccess(bucket, path, BlobAccess.PUBLIC_READ);
Queries.putPendingBackup(dataSource, hash);
} else {
Thread.sleep(250);
etag = delegate().blobMetadata(bucket, path).getETag();
try {
Poolmgr.checkReadOnly();
if (isDump(mpu.blobName())) {
checkContainer(mpu.containerName());
return dumpsStore.completeMultipartUpload(mpu, parts);
}
Queries.putMap(dataSource, identity, Preconditions.checkNotNull(meta.getUserMetadata().get("jortage-originalname")), hash);
Queries.putFilesize(dataSource, hash, counter.getCount());
Queries.removeMultipart(dataSource, mpu.blobName());
Thread.sleep(250);
delegate().removeBlob(mpu.containerName(), mpu.blobName());
} catch (IOException e) {
throw new UncheckedIOException(e);
} catch (InterruptedException e) {
throw new RuntimeException(e);
mpu = mask(mpu);
// TODO this is a bit of a hack and isn't very efficient
String etag = delegate().completeMultipartUpload(mpu, parts);
try (InputStream stream = delegate().getBlob(mpu.containerName(), mpu.blobName()).getPayload().openStream()) {
CountingOutputStream counter = new CountingOutputStream(ByteStreams.nullOutputStream());
HashingOutputStream hos = new HashingOutputStream(Hashing.sha512(), counter);
ByteStreams.copy(stream, hos);
HashCode hash = hos.hash();
String hashStr = hash.toString();
String path = Poolmgr.hashToPath(hashStr);
// we're about to do a bunch of stuff at once
// sleep so we don't fall afoul of request rate limits
// (causes intermittent 429s on at least DigitalOcean)
Thread.sleep(100);
BlobMetadata meta = delegate().blobMetadata(mpu.containerName(), mpu.blobName());
BlobMetadata targetMeta = delegate().blobMetadata(bucket, path);
if (targetMeta == null) {
Thread.sleep(100);
etag = delegate().copyBlob(mpu.containerName(), mpu.blobName(), bucket, path, CopyOptions.builder().contentMetadata(meta.getContentMetadata()).build());
Thread.sleep(100);
delegate().setBlobAccess(bucket, path, BlobAccess.PUBLIC_READ);
Queries.putPendingBackup(dataSource, hash);
} else {
Thread.sleep(100);
etag = targetMeta.getETag();
}
Queries.putMap(dataSource, identity, Preconditions.checkNotNull(meta.getUserMetadata().get("jortage-originalname")), hash);
Queries.putFilesize(dataSource, hash, counter.getCount());
Queries.removeMultipart(dataSource, mpu.blobName());
Thread.sleep(100);
delegate().removeBlob(mpu.containerName(), mpu.blobName());
} catch (IOException e) {
throw new UncheckedIOException(e);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return etag;
} catch (Error | RuntimeException e) {
e.printStackTrace();
throw e;
}
return etag;
}
@Override
@ -437,18 +443,13 @@ public class JortageBlobStore extends ForwardingBlobStore {
private static final String NO_DIR_MSG = "Directories are an illusion";
private static final String NO_BULK_MSG = "Bulk operations are not implemented by Jortage for safety and speed";
private static final String NO_PRIVATE_MSG = "Jortage does not support private objects";
@Override
public void setContainerAccess(String container, ContainerAccess containerAccess) {
if (containerAccess != ContainerAccess.PUBLIC_READ)
throw new UnsupportedOperationException(NO_PRIVATE_MSG);
}
@Override
public void setBlobAccess(String container, String name, BlobAccess access) {
if (access != BlobAccess.PUBLIC_READ)
throw new UnsupportedOperationException(NO_PRIVATE_MSG);
}
@Override

Wyświetl plik

@ -6,9 +6,9 @@ import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import jakarta.servlet.ServletException;
import jakarta.servlet.http.HttpServletRequest;
import jakarta.servlet.http.HttpServletResponse;
import org.eclipse.jetty.server.Handler;
import org.eclipse.jetty.server.Request;
@ -42,10 +42,13 @@ public class MastodonHackHandler extends HandlerWrapper {
response.sendError(202);
} catch (IOException e) {
}
}, 4000, TimeUnit.MILLISECONDS);
}, 2000, TimeUnit.MILLISECONDS);
}
try {
super.handle(target, baseRequest, request, response);
} finally {
if (shortCircuit != null) shortCircuit.cancel(false);
}
super.handle(target, baseRequest, request, response);
if (shortCircuit != null) shortCircuit.cancel(false);
}
}

Wyświetl plik

@ -2,9 +2,9 @@ package com.jortage.poolmgr;
import java.io.IOException;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import jakarta.servlet.ServletException;
import jakarta.servlet.http.HttpServletRequest;
import jakarta.servlet.http.HttpServletResponse;
import org.eclipse.jetty.server.Handler;
import org.eclipse.jetty.server.Request;

Wyświetl plik

@ -8,7 +8,11 @@ import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Map;
import java.util.Properties;
import javax.sql.DataSource;
import sun.misc.Signal;
import org.eclipse.jetty.server.Server;
@ -24,12 +28,13 @@ import org.jclouds.blobstore.domain.BlobAccess;
import org.jclouds.blobstore.options.PutOptions;
import org.jclouds.filesystem.reference.FilesystemConstants;
import org.jclouds.logging.slf4j.config.SLF4JLoggingModule;
import org.mariadb.jdbc.MariaDbPoolDataSource;
import com.zaxxer.hikari.HikariDataSource;
import com.google.common.base.MoreObjects;
import com.google.common.base.Stopwatch;
import com.google.common.collect.HashBasedTable;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import com.google.common.collect.Table;
import com.google.common.escape.Escaper;
@ -37,20 +42,19 @@ import com.google.common.hash.HashCode;
import com.google.common.net.UrlEscapers;
import blue.endless.jankson.Jankson;
import blue.endless.jankson.JsonElement;
import blue.endless.jankson.JsonObject;
import blue.endless.jankson.JsonPrimitive;
public class Poolmgr {
private static final File configFile = new File("config.jkson");
public static JsonObject config;
public static long configFileLastLoaded;
public static BlobStore backingBlobStore;
public static BlobStore backingBackupBlobStore;
public static String bucket;
public static String backupBucket;
public static BlobStore backingBlobStore, backingBackupBlobStore, dumpsStore;
public static String bucket, backupBucket;
public static String publicHost;
public static MariaDbPoolDataSource dataSource;
public static DataSource dataSource;
public static Map<String, String> users;
public static volatile boolean readOnly = false;
private static boolean backingUp = false;
private static boolean rivetEnabled;
@ -58,9 +62,15 @@ public class Poolmgr {
public static final Table<String, String, Object> provisionalMaps = HashBasedTable.create();
@SuppressWarnings("restriction")
public static void main(String[] args) throws Exception {
try {
Properties dumpsProps = new Properties();
dumpsProps.setProperty(FilesystemConstants.PROPERTY_BASEDIR, "dumps");
dumpsStore = ContextBuilder.newBuilder("filesystem")
.overrides(dumpsProps)
.build(BlobStoreContext.class)
.getBlobStore();
Stopwatch initSw = Stopwatch.createStarted();
reloadConfig();
@ -71,6 +81,10 @@ public class Poolmgr {
.endpoint(URI.create("http://localhost:23278"))
.jettyMaxThreads(24)
.v4MaxNonChunkedRequestSize(128L*1024L*1024L)
// S3Proxy will throw if it sees an X-Amz header it doesn't recognize
// Misskey, starting in some recent version (as of July 2023) now sends an X-Amz-User-Agent header
// So without this, Misskey instances can't upload files. Cool thanks
.ignoreUnknownHeaders(true)
.build();
// excuse me, this is mine now
@ -81,17 +95,11 @@ public class Poolmgr {
QueuedThreadPool pool = (QueuedThreadPool)s3ProxyServer.getThreadPool();
pool.setName("Jetty-Common");
Properties dumpsProps = new Properties();
dumpsProps.setProperty(FilesystemConstants.PROPERTY_BASEDIR, "dumps");
BlobStore dumpsStore = ContextBuilder.newBuilder("filesystem")
.overrides(dumpsProps)
.build(BlobStoreContext.class)
.getBlobStore();
s3Proxy.setBlobStoreLocator((identity, container, blob) -> {
reloadConfigIfChanged();
if (config.containsKey("users") && config.getObject("users").containsKey(identity)) {
return Maps.immutableEntry(((JsonPrimitive)config.getObject("users").get(identity)).asString(),
String secret = users.get(identity);
if (secret != null) {
return Maps.immutableEntry(secret,
new JortageBlobStore(backingBlobStore, dumpsStore, bucket, identity, dataSource));
} else {
throw new RuntimeException("Access denied");
@ -193,7 +201,7 @@ public class Poolmgr {
}
public static void reloadConfigIfChanged() {
if (System.currentTimeMillis()-configFileLastLoaded > 500 && configFile.lastModified() > configFileLastLoaded) reloadConfig();
// if (System.currentTimeMillis()-configFileLastLoaded > 500 && configFile.lastModified() > configFileLastLoaded) reloadConfig();
}
private static String s(int i) {
@ -201,7 +209,7 @@ public class Poolmgr {
}
private static void reloadConfig() {
boolean reloading = config != null;
boolean reloading = dataSource != null;
try {
String prelude = "\r"+(reloading ? "Reloading" : "Loading")+" config: ";
System.err.print(prelude+"Parsing...");
@ -234,9 +242,20 @@ public class Poolmgr {
Escaper esc = UrlEscapers.urlFormParameterEscaper();
System.err.print(prelude+"Connecting to MariaDB... ");
System.err.flush();
MariaDbPoolDataSource dataSourceTmp =
new MariaDbPoolDataSource("jdbc:mariadb://"+pesc.escape(sqlHost)+":"+sqlPort+"/"+pesc.escape(sqlDb)
+"?user="+esc.escape(sqlUser)+"&password="+esc.escape(sqlPass)+"&autoReconnect=true");
HikariDataSource dataSourceTmp = new HikariDataSource();
dataSourceTmp.setJdbcUrl("jdbc:mariadb://"+pesc.escape(sqlHost)+":"+sqlPort+"/"+pesc.escape(sqlDb));
dataSourceTmp.setUsername(sqlUser);
dataSourceTmp.setPassword(sqlPass);
dataSourceTmp.addDataSourceProperty("cachePrepStmts", "true");
dataSourceTmp.addDataSourceProperty("prepStmtCacheSize", "100");
dataSourceTmp.addDataSourceProperty("prepStmtCacheSqlLimit", "3000");
dataSourceTmp.addDataSourceProperty("useServerPrepStmts", "true");
dataSourceTmp.addDataSourceProperty("useLocalSessionState", "true");
dataSourceTmp.addDataSourceProperty("rewriteBatchedStatements", "true");
dataSourceTmp.addDataSourceProperty("cacheResultSetMetadata", "true");
dataSourceTmp.addDataSourceProperty("cacheServerConfiguration", "true");
dataSourceTmp.addDataSourceProperty("elideSetAutoCommits", "true");
dataSourceTmp.addDataSourceProperty("maintainTimeStats", "false");
try (Connection c = dataSourceTmp.getConnection()) {
execOneshot(c, "CREATE TABLE IF NOT EXISTS `name_map` (\n" +
" `id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,\n" +
@ -266,9 +285,14 @@ public class Poolmgr {
" PRIMARY KEY (`hash`)\n" +
") ROW_FORMAT=COMPRESSED;");
}
ImmutableMap.Builder<String, String> usersTmp = ImmutableMap.builder();
for (Map.Entry<String, JsonElement> en : configTmp.getObject("users").entrySet()) {
usersTmp.put(en.getKey(), ((JsonPrimitive)en.getValue()).asString());
dumpsStore.createContainerInLocation(null, en.getKey());
}
users = usersTmp.build();
System.err.println("\r"+(reloading ? "Reloading" : "Loading")+" config... done ");
MariaDbPoolDataSource oldDataSource = dataSource;
config = configTmp;
HikariDataSource oldDataSource = (HikariDataSource)dataSource;
configFileLastLoaded = configFileLastLoadedTmp;
bucket = bucketTmp;
publicHost = publicHostTmp;
@ -297,7 +321,7 @@ public class Poolmgr {
}
private static BlobStore createBlobStore(JsonObject obj) {
return ContextBuilder.newBuilder("s3")
return ContextBuilder.newBuilder("aws-s3")
.credentials(((JsonPrimitive)obj.get("accessKeyId")).asString(), ((JsonPrimitive)obj.get("secretAccessKey")).asString())
.modules(ImmutableList.of(new SLF4JLoggingModule()))
.endpoint(((JsonPrimitive)obj.get("endpoint")).asString())

Wyświetl plik

@ -2,9 +2,9 @@ package com.jortage.poolmgr;
import java.io.IOException;
import java.util.List;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import jakarta.servlet.ServletException;
import jakarta.servlet.http.HttpServletRequest;
import jakarta.servlet.http.HttpServletResponse;
import org.eclipse.jetty.server.Request;
import org.eclipse.jetty.server.handler.AbstractHandler;
import org.jclouds.blobstore.BlobStore;

Wyświetl plik

@ -24,9 +24,9 @@ import kotlin.Pair;
import javax.crypto.Mac;
import javax.crypto.spec.SecretKeySpec;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import jakarta.servlet.ServletException;
import jakarta.servlet.http.HttpServletRequest;
import jakarta.servlet.http.HttpServletResponse;
import org.eclipse.jetty.server.handler.AbstractHandler;
import org.jclouds.blobstore.domain.Blob;
import org.jclouds.blobstore.domain.BlobAccess;
@ -98,7 +98,7 @@ public final class RivetHandler extends AbstractHandler {
private final LoadingCache<String, HashCode> urlCache = CacheBuilder.newBuilder()
.concurrencyLevel(1)
.expireAfterWrite(10, TimeUnit.MINUTES)
.removalListener((n) -> {
.<String, HashCode>removalListener((n) -> {
synchronized (retrieveMutex) {
results.remove(n.getKey());
}
@ -171,7 +171,7 @@ public final class RivetHandler extends AbstractHandler {
}
private HashCode checkShortCircuit(String originalUrl, HttpUrl url, Temperature temp) {
String publicHost = Poolmgr.config.getObject("backend").get(String.class, "publicHost").replaceFirst("^https?://", "");
String publicHost = Poolmgr.publicHost.replaceFirst("^https?://", "");
String fullHost = url.host();
if (url.port() != (url.scheme().equals("https") ? 443 : 80)) {
fullHost = fullHost+":"+url.port();
@ -457,7 +457,7 @@ public final class RivetHandler extends AbstractHandler {
return null;
}
if (!Poolmgr.config.containsKey("users") || !Poolmgr.config.getObject("users").containsKey(identity)) {
if (!Poolmgr.users.containsKey(identity)) {
jsonError(res, 401, "Rivet-Auth header invalid (Bad access ID)");
return null;
}
@ -490,7 +490,7 @@ public final class RivetHandler extends AbstractHandler {
}
String payloadStr = new String(payload, Charsets.UTF_8);
String key = Poolmgr.config.getObject("users").get(String.class, identity);
String key = Poolmgr.users.get(identity);
assertSuccess(() -> mac.init(new SecretKeySpec(key.getBytes(Charsets.UTF_8), "RAW")));
String query;
if (req.getQueryString() == null) {