kopia lustrzana https://github.com/onthegomap/planetiler
tweaks for running on large areas
rodzic
2526b5b84f
commit
d89e2731ee
|
@ -13,6 +13,7 @@ public record CommonParams(
|
|||
int maxzoom,
|
||||
boolean deferIndexCreation,
|
||||
boolean optimizeDb,
|
||||
boolean emitTilesInOrder,
|
||||
boolean forceOverwrite,
|
||||
boolean gzipTempStorage,
|
||||
String longLongMap,
|
||||
|
@ -30,6 +31,7 @@ public record CommonParams(
|
|||
int maxzoom,
|
||||
boolean deferIndexCreation,
|
||||
boolean optimizeDb,
|
||||
boolean emitTilesInOrder,
|
||||
boolean forceOverwrite,
|
||||
boolean gzipTempStorage,
|
||||
String longLongMap
|
||||
|
@ -42,6 +44,7 @@ public record CommonParams(
|
|||
maxzoom,
|
||||
deferIndexCreation,
|
||||
optimizeDb,
|
||||
emitTilesInOrder,
|
||||
forceOverwrite,
|
||||
gzipTempStorage,
|
||||
longLongMap,
|
||||
|
@ -84,6 +87,7 @@ public record CommonParams(
|
|||
arguments.integer("maxzoom", "maximum zoom level (limit 14)", MAX_MAXZOOM),
|
||||
arguments.get("defer_mbtiles_index_creation", "add index to mbtiles file after finished writing", false),
|
||||
arguments.get("optimize_db", "optimize mbtiles after writing", false),
|
||||
arguments.get("emit_tiles_in_order", "emit tiles in index order", false),
|
||||
arguments.get("force", "force overwriting output file", false),
|
||||
arguments.get("gzip_temp", "gzip temporary feature storage (uses more CPU, but less disk space)", false),
|
||||
arguments.get("llmap", "type of long long map", "mapdb")
|
||||
|
|
|
@ -2,6 +2,7 @@ package com.onthegomap.flatmap.geo;
|
|||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import org.locationtech.jts.geom.Coordinate;
|
||||
import org.locationtech.jts.geom.Envelope;
|
||||
import org.locationtech.jts.geom.Geometry;
|
||||
|
@ -22,7 +23,21 @@ public class PointIndex<T> {
|
|||
return new PointIndex<>();
|
||||
}
|
||||
|
||||
private volatile boolean built = false;
|
||||
|
||||
private void build() {
|
||||
if (!built) {
|
||||
synchronized (this) {
|
||||
if (!built) {
|
||||
index.build();
|
||||
built = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public List<T> getWithin(Point point, double threshold) {
|
||||
build();
|
||||
Coordinate coord = point.getCoordinate();
|
||||
Envelope envelope = point.getEnvelopeInternal();
|
||||
envelope.expandBy(threshold);
|
||||
|
@ -41,6 +56,7 @@ public class PointIndex<T> {
|
|||
}
|
||||
|
||||
public T getNearest(Point point, double threshold) {
|
||||
build();
|
||||
Coordinate coord = point.getCoordinate();
|
||||
Envelope envelope = point.getEnvelopeInternal();
|
||||
envelope.expandBy(threshold);
|
||||
|
@ -62,7 +78,8 @@ public class PointIndex<T> {
|
|||
|
||||
public void put(Geometry geom, T item) {
|
||||
if (geom instanceof Point point && !point.isEmpty()) {
|
||||
index.insert(point.getEnvelopeInternal(), new GeomWithData<>(point.getCoordinate(), item));
|
||||
Envelope envelope = Objects.requireNonNull(point.getEnvelopeInternal());
|
||||
index.insert(envelope, new GeomWithData<>(point.getCoordinate(), item));
|
||||
} else if (geom instanceof GeometryCollection geoms) {
|
||||
for (int i = 0; i < geoms.getNumGeometries(); i++) {
|
||||
put(geoms.getGeometryN(i), item);
|
||||
|
|
|
@ -99,20 +99,31 @@ public class MbtilesWriter {
|
|||
|
||||
var topology = Topology.start("mbtiles", stats);
|
||||
|
||||
int queueSize = 30_000 / BATCH_SIZE;
|
||||
int queueSize = 10_000;
|
||||
WorkQueue<TileBatch> writerQueue = new WorkQueue<>("mbtiles_writer_queue", queueSize, 1, stats);
|
||||
|
||||
var encodeBranch = topology
|
||||
.<TileBatch>fromGenerator("reader", next -> writer.readFeatures(batch -> {
|
||||
next.accept(batch);
|
||||
writerQueue.accept(batch); // also send immediately to writer
|
||||
}), 1)
|
||||
.addBuffer("reader_queue", queueSize)
|
||||
.sinkTo("encoder", config.threads(), writer::tileEncoder);
|
||||
Topology<TileBatch> encodeBranch, writeBranch;
|
||||
if (true || config.emitTilesInOrder()) {
|
||||
encodeBranch = topology
|
||||
.<TileBatch>fromGenerator("reader", next -> writer.readFeatures(batch -> {
|
||||
next.accept(batch);
|
||||
writerQueue.accept(batch); // also send immediately to writer
|
||||
}), 1)
|
||||
.addBuffer("reader_queue", queueSize)
|
||||
.sinkTo("encoder", config.threads(), writer::tileEncoder);
|
||||
|
||||
// the tile writer will wait on the result of each batch to ensure tiles are written in order
|
||||
var writeBranch = topology.readFromQueue(writerQueue)
|
||||
.sinkTo("writer", 1, writer::tileWriter);
|
||||
// the tile writer will wait on the result of each batch to ensure tiles are written in order
|
||||
writeBranch = topology.readFromQueue(writerQueue)
|
||||
.sinkTo("writer", 1, writer::tileWriter);
|
||||
} else {
|
||||
// TODO
|
||||
// encodeBranch = topology
|
||||
// .fromGenerator("reader", writer::readFeatures, 1)
|
||||
// .addBuffer("reader_queue", queueSize)
|
||||
// .addWorker("encoder", config.threads(), (prev, next) -> {
|
||||
// TOO
|
||||
// })
|
||||
}
|
||||
|
||||
var loggers = new ProgressLoggers("mbtiles")
|
||||
.addRatePercentCounter("features", features.numFeatures(), writer.featuresProcessed)
|
||||
|
@ -164,16 +175,24 @@ public class MbtilesWriter {
|
|||
void readFeatures(Consumer<TileBatch> next) {
|
||||
int currentZoom = Integer.MIN_VALUE;
|
||||
TileBatch batch = new TileBatch();
|
||||
long featuresInThisBatch = 0;
|
||||
long tilesInThisBatch = 0;
|
||||
// 249 vs. 24,900
|
||||
long MAX_FEATURES_PER_BATCH = BATCH_SIZE * 100;
|
||||
for (var feature : features) {
|
||||
int z = feature.coord().z();
|
||||
if (z > currentZoom) {
|
||||
LOGGER.info("[mbtiles] Starting z" + z);
|
||||
currentZoom = z;
|
||||
}
|
||||
if (batch.in.size() >= BATCH_SIZE) {
|
||||
if (tilesInThisBatch > BATCH_SIZE || featuresInThisBatch > MAX_FEATURES_PER_BATCH) {
|
||||
next.accept(batch);
|
||||
batch = new TileBatch();
|
||||
featuresInThisBatch = 0;
|
||||
tilesInThisBatch = 0;
|
||||
}
|
||||
featuresInThisBatch++;
|
||||
tilesInThisBatch += feature.getNumFeatures();
|
||||
batch.in.offer(feature);
|
||||
}
|
||||
if (!batch.in.isEmpty()) {
|
||||
|
|
|
@ -4,4 +4,4 @@ set -o errexit
|
|||
set -o pipefail
|
||||
set -o nounset
|
||||
|
||||
mvn -DskipTests=true --projects openmaptiles -am package
|
||||
mvn -DskipTests=true --projects openmaptiles -am clean package
|
||||
|
|
|
@ -15,7 +15,7 @@ fi
|
|||
rsync -avzP openmaptiles/target/flatmap-openmaptiles-0.1-SNAPSHOT-fatjar.jar "${1}":flatmap.jar
|
||||
scp scripts/download-other-sources.sh "${1}":download-other-sources.sh
|
||||
scp scripts/download-osm.sh "${1}":download-osm.sh
|
||||
ssh "${1}" "bash -s" <<EOF
|
||||
ssh -t "${1}" "bash -s" <<EOF
|
||||
wget -qO - https://adoptopenjdk.jfrog.io/adoptopenjdk/api/gpg/key/public | sudo apt-key add - && \
|
||||
add-apt-repository --yes https://adoptopenjdk.jfrog.io/adoptopenjdk/deb/ && \
|
||||
apt-get update -y && \
|
||||
|
|
Ładowanie…
Reference in New Issue