planetiler/planetiler-core/src/main/java/com/onthegomap/planetiler/archive/TileArchiveWriter.java

405 wiersze
15 KiB
Java
Czysty Zwykły widok Historia

package com.onthegomap.planetiler.archive;
2021-10-20 01:57:47 +00:00
import static com.onthegomap.planetiler.util.Gzip.gzip;
2022-02-24 01:45:56 +00:00
import static com.onthegomap.planetiler.worker.Worker.joinFutures;
2021-04-10 09:25:42 +00:00
import com.onthegomap.planetiler.VectorTile;
import com.onthegomap.planetiler.collection.FeatureGroup;
import com.onthegomap.planetiler.config.PlanetilerConfig;
import com.onthegomap.planetiler.geo.TileCoord;
import com.onthegomap.planetiler.stats.Counter;
import com.onthegomap.planetiler.stats.ProcessInfo;
import com.onthegomap.planetiler.stats.ProgressLoggers;
import com.onthegomap.planetiler.stats.Stats;
import com.onthegomap.planetiler.stats.Timer;
import com.onthegomap.planetiler.util.DiskBacked;
import com.onthegomap.planetiler.util.Format;
2023-01-14 21:03:50 +00:00
import com.onthegomap.planetiler.util.Hashing;
import com.onthegomap.planetiler.util.LayerStats;
import com.onthegomap.planetiler.worker.WorkQueue;
import com.onthegomap.planetiler.worker.Worker;
import com.onthegomap.planetiler.worker.WorkerPipeline;
2021-04-12 10:54:52 +00:00
import java.io.IOException;
2021-07-26 00:49:58 +00:00
import java.util.ArrayDeque;
2021-07-29 01:47:13 +00:00
import java.util.ArrayList;
2021-06-06 12:00:04 +00:00
import java.util.LinkedHashMap;
2021-07-29 01:47:13 +00:00
import java.util.List;
2021-06-06 12:00:04 +00:00
import java.util.Map;
2022-06-04 00:44:49 +00:00
import java.util.OptionalLong;
2021-07-26 00:49:58 +00:00
import java.util.Queue;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicReference;
2021-06-06 12:00:04 +00:00
import java.util.concurrent.atomic.LongAccumulator;
2021-04-12 10:54:52 +00:00
import java.util.function.Consumer;
import java.util.function.LongSupplier;
2021-06-04 11:22:40 +00:00
import java.util.stream.IntStream;
2021-06-06 12:00:04 +00:00
import java.util.stream.Stream;
2021-04-12 10:54:52 +00:00
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
2021-04-10 09:25:42 +00:00
2021-09-10 00:46:20 +00:00
/**
2023-01-17 12:05:45 +00:00
* Final stage of the map generation process that encodes vector tiles using {@link VectorTile} and writes them to a
* {@link WriteableTileArchive}.
2021-09-10 00:46:20 +00:00
*/
2023-01-17 12:05:45 +00:00
public class TileArchiveWriter {
2021-04-10 09:25:42 +00:00
2023-01-17 12:05:45 +00:00
private static final Logger LOGGER = LoggerFactory.getLogger(TileArchiveWriter.class);
2021-09-18 01:12:24 +00:00
private static final long MAX_FEATURES_PER_BATCH = 10_000;
private static final long MAX_TILES_PER_BATCH = 1_000;
2021-06-06 12:00:04 +00:00
private final Counter.Readable featuresProcessed;
private final Counter memoizedTiles;
private final WriteableTileArchive archive;
private final PlanetilerConfig config;
2021-04-12 10:54:52 +00:00
private final Stats stats;
2021-05-13 10:25:06 +00:00
private final LayerStats layerStats;
2021-06-06 12:00:04 +00:00
private final Counter.Readable[] tilesByZoom;
private final Counter.Readable[] totalTileSizesByZoom;
private final LongAccumulator[] maxTileSizesByZoom;
private final Iterable<FeatureGroup.TileFeatures> inputTiles;
2021-07-26 00:49:58 +00:00
private final AtomicReference<TileCoord> lastTileWritten = new AtomicReference<>();
2023-01-17 12:05:45 +00:00
private final TileArchiveMetadata tileArchiveMetadata;
2021-06-04 11:22:40 +00:00
private TileArchiveWriter(Iterable<FeatureGroup.TileFeatures> inputTiles, WriteableTileArchive archive,
2023-01-17 12:05:45 +00:00
PlanetilerConfig config,
TileArchiveMetadata tileArchiveMetadata, Stats stats, LayerStats layerStats) {
this.inputTiles = inputTiles;
2023-01-17 12:05:45 +00:00
this.archive = archive;
2021-05-01 20:08:20 +00:00
this.config = config;
2023-01-17 12:05:45 +00:00
this.tileArchiveMetadata = tileArchiveMetadata;
2021-04-12 10:54:52 +00:00
this.stats = stats;
2021-05-13 10:25:06 +00:00
this.layerStats = layerStats;
2021-09-10 00:46:20 +00:00
tilesByZoom = IntStream.rangeClosed(0, config.maxzoom())
.mapToObj(i -> Counter.newSingleThreadCounter())
2021-06-06 12:00:04 +00:00
.toArray(Counter.Readable[]::new);
2021-09-10 00:46:20 +00:00
totalTileSizesByZoom = IntStream.rangeClosed(0, config.maxzoom())
.mapToObj(i -> Counter.newMultiThreadCounter())
2021-06-06 12:00:04 +00:00
.toArray(Counter.Readable[]::new);
2021-09-10 00:46:20 +00:00
maxTileSizesByZoom = IntStream.rangeClosed(0, config.maxzoom())
.mapToObj(i -> new LongAccumulator(Long::max, 0))
2021-06-06 12:00:04 +00:00
.toArray(LongAccumulator[]::new);
2023-01-17 12:05:45 +00:00
memoizedTiles = stats.longCounter("archive_memoized_tiles");
featuresProcessed = stats.longCounter("archive_features_processed");
Map<String, LongSupplier> countsByZoom = new LinkedHashMap<>();
2021-06-06 12:00:04 +00:00
for (int zoom = config.minzoom(); zoom <= config.maxzoom(); zoom++) {
countsByZoom.put(Integer.toString(zoom), tilesByZoom[zoom]);
}
2023-01-17 12:05:45 +00:00
stats.counter("archive_tiles_written", "zoom", () -> countsByZoom);
2021-05-08 10:53:37 +00:00
}
2021-09-10 00:46:20 +00:00
/** Reads all {@code features}, encodes them in parallel, and writes to {@code output}. */
public static void writeOutput(FeatureGroup features, WriteableTileArchive output, DiskBacked fileSize,
2023-01-17 12:05:45 +00:00
TileArchiveMetadata tileArchiveMetadata, PlanetilerConfig config, Stats stats) {
var timer = stats.startStage("archive");
int readThreads = config.featureReadThreads();
int threads = config.threads();
int processThreads = threads < 10 ? threads : threads - readThreads;
// when using more than 1 read thread: (N read threads) -> (1 merge thread) -> ...
// when using 1 read thread we just have: (1 read & merge thread) -> ...
Worker readWorker = null;
Iterable<FeatureGroup.TileFeatures> inputTiles;
String secondStageName;
if (readThreads == 1) {
secondStageName = "read";
inputTiles = features;
} else {
secondStageName = "merge";
var reader = features.parallelIterator(readThreads);
inputTiles = reader.result();
readWorker = reader.readWorker();
}
2023-01-17 12:05:45 +00:00
TileArchiveWriter writer = new TileArchiveWriter(inputTiles, output, config, tileArchiveMetadata, stats,
2021-07-18 09:57:48 +00:00
features.layerStats());
2021-04-12 10:54:52 +00:00
2023-01-17 12:05:45 +00:00
var pipeline = WorkerPipeline.start("archive", stats);
2021-07-26 00:49:58 +00:00
2021-10-20 01:57:47 +00:00
// a larger tile queue size helps keep cores busy, but needs a lot of RAM
// 5k works fine with 100GB of RAM, so adjust the queue size down from there
// but no less than 100
int queueSize = Math.max(
100,
(int) (5_000d * ProcessInfo.getMaxMemoryBytes() / 100_000_000_000d)
);
2021-07-26 00:49:58 +00:00
2021-08-05 11:09:52 +00:00
WorkerPipeline<TileBatch> encodeBranch, writeBranch = null;
2021-07-26 11:27:56 +00:00
/*
* To emit tiles in order, fork the input queue and send features to both the encoder and writer. The writer
* waits on them to be encoded in the order they were received, and the encoder processes them in parallel.
* One batch might take a long time to process, so make the queues very big to avoid idle encoding CPUs.
*/
WorkQueue<TileBatch> writerQueue = new WorkQueue<>("archive_writer_queue", queueSize, 1, stats);
encodeBranch = pipeline
.<TileBatch>fromGenerator(secondStageName, next -> {
var writerEnqueuer = writerQueue.threadLocalWriter();
writer.readFeaturesAndBatch(batch -> {
next.accept(batch);
writerEnqueuer.accept(batch); // also send immediately to writer
});
writerQueue.close();
2021-09-10 00:46:20 +00:00
// use only 1 thread since readFeaturesAndBatch needs to be single-threaded
}, 1)
.addBuffer("reader_queue", queueSize)
.sinkTo("encode", processThreads, writer::tileEncoderSink);
// the tile writer will wait on the result of each batch to ensure tiles are written in order
writeBranch = pipeline.readFromQueue(writerQueue)
// use only 1 thread since tileWriter needs to be single-threaded
.sinkTo("write", 1, writer::tileWriter);
2021-04-12 10:54:52 +00:00
2021-09-10 00:46:20 +00:00
var loggers = ProgressLoggers.create()
.addRatePercentCounter("features", features.numFeaturesWritten(), writer.featuresProcessed, true)
2021-10-20 01:57:47 +00:00
.addFileSize(features)
2021-06-06 12:00:04 +00:00
.addRateCounter("tiles", writer::tilesEmitted)
2021-05-08 10:53:37 +00:00
.addFileSize(fileSize)
2021-08-10 10:55:30 +00:00
.newLine()
2021-04-12 10:54:52 +00:00
.addProcessStats()
.newLine();
if (readWorker != null) {
loggers.addThreadPoolStats("read", readWorker);
}
loggers.addPipelineStats(encodeBranch)
2021-08-05 11:09:52 +00:00
.addPipelineStats(writeBranch)
2021-07-26 00:49:58 +00:00
.newLine()
2021-09-10 00:46:20 +00:00
.add(writer::getLastTileLogDetails);
2021-04-12 10:54:52 +00:00
2022-02-24 01:45:56 +00:00
var doneFuture = writeBranch == null ? encodeBranch.done() : joinFutures(writeBranch.done(), encodeBranch.done());
loggers.awaitAndLog(doneFuture, config.logInterval());
2021-07-26 00:49:58 +00:00
writer.printTileStats();
2021-06-08 00:55:23 +00:00
timer.stop();
2021-04-12 10:54:52 +00:00
}
2021-09-10 00:46:20 +00:00
private String getLastTileLogDetails() {
TileCoord lastTile = lastTileWritten.get();
String blurb;
if (lastTile == null) {
blurb = "n/a";
} else {
2022-07-26 11:51:31 +00:00
blurb = "%d/%d/%d (z%d %s) %s".formatted(
2021-09-10 00:46:20 +00:00
lastTile.z(), lastTile.x(), lastTile.y(),
lastTile.z(),
Format.defaultInstance().percent(archive.tileOrder().progressOnLevel(lastTile, config.bounds().tileExtents())),
2021-09-10 00:46:20 +00:00
lastTile.getDebugUrl()
);
}
return "last tile: " + blurb;
}
private void readFeaturesAndBatch(Consumer<TileBatch> next) {
2021-07-18 09:57:48 +00:00
int currentZoom = Integer.MIN_VALUE;
2021-07-26 00:49:58 +00:00
TileBatch batch = new TileBatch();
2021-07-26 11:27:56 +00:00
long featuresInThisBatch = 0;
long tilesInThisBatch = 0;
for (var feature : inputTiles) {
2021-09-10 00:46:20 +00:00
int z = feature.tileCoord().z();
if (z != currentZoom) {
2022-04-23 09:58:49 +00:00
LOGGER.trace("Starting z{}", z);
2021-07-18 09:57:48 +00:00
currentZoom = z;
}
2021-07-29 11:07:58 +00:00
long thisTileFeatures = feature.getNumFeaturesToEmit();
if (tilesInThisBatch > 0 &&
(tilesInThisBatch >= MAX_TILES_PER_BATCH ||
((featuresInThisBatch + thisTileFeatures) > MAX_FEATURES_PER_BATCH))) {
2021-07-26 00:49:58 +00:00
next.accept(batch);
batch = new TileBatch();
2021-07-26 11:27:56 +00:00
featuresInThisBatch = 0;
tilesInThisBatch = 0;
2021-07-26 00:49:58 +00:00
}
featuresInThisBatch += thisTileFeatures;
2021-07-27 12:09:06 +00:00
tilesInThisBatch++;
2021-07-29 01:47:13 +00:00
batch.in.add(feature);
2021-07-26 00:49:58 +00:00
}
if (!batch.in.isEmpty()) {
next.accept(batch);
2021-07-18 09:57:48 +00:00
}
}
2022-02-24 01:32:41 +00:00
private void tileEncoderSink(Iterable<TileBatch> prev) throws IOException {
2021-07-27 02:01:55 +00:00
tileEncoder(prev, batch -> {
2021-09-10 00:46:20 +00:00
// no next step
2021-07-27 02:01:55 +00:00
});
}
2022-02-24 01:32:41 +00:00
private void tileEncoder(Iterable<TileBatch> prev, Consumer<TileBatch> next) throws IOException {
2021-09-10 00:46:20 +00:00
/*
* To optimize emitting many identical consecutive tiles (like large ocean areas), memoize output to avoid
* recomputing if the input hasn't changed.
*/
2021-04-12 10:54:52 +00:00
byte[] lastBytes = null, lastEncoded = null;
2022-06-04 00:44:49 +00:00
Long lastTileDataHash = null;
boolean lastIsFill = false;
boolean compactDb = config.compactDb();
boolean skipFilled = config.skipFilledTiles();
2021-07-26 00:49:58 +00:00
2022-02-24 01:32:41 +00:00
for (TileBatch batch : prev) {
Queue<TileEncodingResult> result = new ArrayDeque<>(batch.size());
2021-07-29 01:47:13 +00:00
FeatureGroup.TileFeatures last = null;
2021-09-10 00:46:20 +00:00
// each batch contains tile ordered by z asc, x asc, y desc
2021-07-29 01:47:13 +00:00
for (int i = 0; i < batch.in.size(); i++) {
FeatureGroup.TileFeatures tileFeatures = batch.in.get(i);
2021-07-29 11:07:58 +00:00
featuresProcessed.incBy(tileFeatures.getNumFeaturesProcessed());
2021-07-26 00:49:58 +00:00
byte[] bytes, encoded;
2022-06-04 00:44:49 +00:00
Long tileDataHash;
2021-07-26 00:49:58 +00:00
if (tileFeatures.hasSameContents(last)) {
bytes = lastBytes;
encoded = lastEncoded;
tileDataHash = lastTileDataHash;
2021-07-26 00:49:58 +00:00
memoizedTiles.inc();
} else {
2021-09-10 00:46:20 +00:00
VectorTile en = tileFeatures.getVectorTileEncoder();
2022-06-20 11:32:55 +00:00
if (skipFilled && (lastIsFill = en.containsOnlyFills())) {
encoded = null;
bytes = null;
} else {
encoded = en.encode();
bytes = gzip(encoded);
2022-07-14 09:26:53 +00:00
if (encoded.length > config.tileWarningSizeBytes()) {
2022-06-20 11:32:55 +00:00
LOGGER.warn("{} {}kb uncompressed",
tileFeatures.tileCoord(),
encoded.length / 1024);
}
}
2022-06-20 11:32:55 +00:00
lastEncoded = encoded;
lastBytes = bytes;
2021-07-26 00:49:58 +00:00
last = tileFeatures;
2023-01-14 21:03:50 +00:00
if (compactDb && en.likelyToBeDuplicated() && bytes != null) {
tileDataHash = generateContentHash(bytes);
} else {
tileDataHash = null;
}
lastTileDataHash = tileDataHash;
2021-04-12 10:54:52 +00:00
}
2022-06-20 11:32:55 +00:00
if (skipFilled && lastIsFill) {
continue;
}
2021-09-10 00:46:20 +00:00
int zoom = tileFeatures.tileCoord().z();
2022-04-23 09:58:49 +00:00
int encodedLength = encoded == null ? 0 : encoded.length;
2021-07-26 00:49:58 +00:00
totalTileSizesByZoom[zoom].incBy(encodedLength);
maxTileSizesByZoom[zoom].accumulate(encodedLength);
result.add(
new TileEncodingResult(tileFeatures.tileCoord(), bytes,
2022-06-04 00:44:49 +00:00
tileDataHash == null ? OptionalLong.empty() : OptionalLong.of(tileDataHash))
);
2021-04-12 10:54:52 +00:00
}
2021-09-10 00:46:20 +00:00
// hand result off to writer
2021-07-26 00:49:58 +00:00
batch.out.complete(result);
2021-07-27 02:01:55 +00:00
next.accept(batch);
2021-04-12 10:54:52 +00:00
}
}
2022-02-24 01:32:41 +00:00
private void tileWriter(Iterable<TileBatch> tileBatches) throws ExecutionException, InterruptedException {
2023-01-17 12:05:45 +00:00
archive.initialize(config, tileArchiveMetadata, layerStats);
2021-07-26 00:49:58 +00:00
TileCoord lastTile = null;
2021-09-10 00:46:20 +00:00
Timer time = null;
int currentZ = Integer.MIN_VALUE;
2023-01-17 12:05:45 +00:00
try (var tileWriter = archive.newTileWriter()) {
2022-02-24 01:32:41 +00:00
for (TileBatch batch : tileBatches) {
Queue<TileEncodingResult> encodedTiles = batch.out.get();
TileEncodingResult encodedTile;
while ((encodedTile = encodedTiles.poll()) != null) {
TileCoord tileCoord = encodedTile.coord();
2021-09-10 00:46:20 +00:00
assert lastTile == null || lastTile.compareTo(tileCoord) < 0 : "Tiles out of order %s before %s"
.formatted(lastTile, tileCoord);
lastTile = encodedTile.coord();
2021-07-26 00:49:58 +00:00
int z = tileCoord.z();
2021-09-10 00:46:20 +00:00
if (z != currentZ) {
if (time == null) {
2022-04-23 09:58:49 +00:00
LOGGER.info("Starting z{}", z);
2021-09-10 00:46:20 +00:00
} else {
2022-04-23 09:58:49 +00:00
LOGGER.info("Finished z{} in {}, now starting z{}", currentZ, time.stop(), z);
2021-09-10 00:46:20 +00:00
}
time = Timer.start();
currentZ = z;
}
2023-01-17 12:05:45 +00:00
tileWriter.write(encodedTile);
stats.wroteTile(z, encodedTile.tileData() == null ? 0 : encodedTile.tileData().length);
2021-07-26 00:49:58 +00:00
tilesByZoom[z].inc();
}
lastTileWritten.set(lastTile);
2021-05-01 20:08:20 +00:00
}
2023-01-17 12:05:45 +00:00
tileWriter.printStats();
2021-05-08 10:53:37 +00:00
}
2021-05-01 20:08:20 +00:00
2021-10-20 01:57:47 +00:00
if (time != null) {
2022-04-23 09:58:49 +00:00
LOGGER.info("Finished z{} in {}", currentZ, time.stop());
2021-10-20 01:57:47 +00:00
}
2023-01-17 12:05:45 +00:00
archive.finish(config);
2021-06-04 11:22:40 +00:00
}
private void printTileStats() {
if (LOGGER.isDebugEnabled()) {
Format format = Format.defaultInstance();
LOGGER.debug("Tile stats:");
long sumSize = 0;
long sumCount = 0;
long maxMax = 0;
for (int z = config.minzoom(); z <= config.maxzoom(); z++) {
long totalCount = tilesByZoom[z].get();
long totalSize = totalTileSizesByZoom[z].get();
sumSize += totalSize;
sumCount += totalCount;
long maxSize = maxTileSizesByZoom[z].get();
maxMax = Math.max(maxMax, maxSize);
LOGGER.debug("z{} avg:{} max:{}",
z,
format.storage(totalCount == 0 ? 0 : (totalSize / totalCount), false),
format.storage(maxSize, false));
}
LOGGER.debug("all avg:{} max:{}",
format.storage(sumCount == 0 ? 0 : (sumSize / sumCount), false),
format.storage(maxMax, false));
LOGGER.debug(" # features: {}", format.integer(featuresProcessed.get()));
LOGGER.debug(" # tiles: {}", format.integer(this.tilesEmitted()));
2021-06-04 11:22:40 +00:00
}
2021-06-06 12:00:04 +00:00
}
private long tilesEmitted() {
2021-09-10 00:46:20 +00:00
return Stream.of(tilesByZoom).mapToLong(c -> c.get()).sum();
2021-04-12 10:54:52 +00:00
}
2023-01-14 21:03:50 +00:00
/**
* Generates a hash over encoded and compressed tile.
* <p>
* Used as an optimization to avoid writing the same (mostly ocean) tiles over and over again.
*/
public static long generateContentHash(byte[] bytes) {
return Hashing.fnv1a64(bytes);
}
2021-09-18 01:12:24 +00:00
/**
* Container for a batch of tiles to be processed together in the encoder and writer threads.
* <p>
* The cost of encoding a tile may vary dramatically by its size (depending on the profile) so batches are sized
* dynamically to put as little as 1 large tile, or as many as 10,000 small tiles in a batch to keep encoding threads
* busy.
*
* @param in the tile data to encode
* @param out the future that encoder thread completes to hand finished tile off to writer thread
*/
2022-02-24 01:45:56 +00:00
private record TileBatch(
2021-09-18 01:12:24 +00:00
List<FeatureGroup.TileFeatures> in,
CompletableFuture<Queue<TileEncodingResult>> out
2021-09-18 01:12:24 +00:00
) {
TileBatch() {
this(new ArrayList<>(), new CompletableFuture<>());
}
public int size() {
return in.size();
}
public boolean isEmpty() {
return in.isEmpty();
2021-04-12 10:54:52 +00:00
}
2021-04-10 09:25:42 +00:00
}
}