Remove mock-s3 problem child

pull/351/head
lukasmartinelli 2016-06-14 19:47:06 +02:00
rodzic 62322ce10b
commit 6dbd02b2a6
8 zmienionych plików z 49 dodań i 33 usunięć

Wyświetl plik

@ -91,7 +91,6 @@ merge-jobs:
- ./export:/data/export
links:
- rabbitmq:rabbitmq
- mock-s3:mock-s3
export-worker:
image: "osm2vectortiles/export"
command: ./export-worker.sh
@ -100,11 +99,11 @@ export-worker:
links:
- postgis:db
- rabbitmq:rabbitmq
- mock-s3:mock-s3
environment:
AWS_ACCESS_KEY_ID: "${AWS_ACCESS_KEY_ID}"
AWS_SECRET_ACCESS_KEY: "${AWS_SECRET_ACCESS_KEY}"
AWS_REGION: "eu-central-1"
AWS_S3_HOST: "${AWS_S3_HOST}"
BUCKET_NAME: "osm2vectortiles-testing"
export:
image: "osm2vectortiles/export"
command: ./export-local.sh
@ -163,12 +162,8 @@ create-extracts:
volumes:
- ./export:/data/export
environment:
S3_ACCESS_KEY: "${S3_ACCESS_KEY}"
S3_SECRET_KEY: "${S3_SECRET_KEY}"
mock-s3:
image: "ianblenke/mock-s3"
ports:
- "8080"
S3_ACCESS_KEY: "${AWS_ACCESS_KEY_ID}"
S3_SECRET_KEY: "${AWS_SECRET_ACCESS_KEY}"
compare-visual:
image: "osm2vectortiles/compare-visual"
ports:

Wyświetl plik

@ -5,12 +5,11 @@ set -o nounset
source utils.sh
readonly QUEUE_NAME=${QUEUE_NAME:-osm2vectortiles_jobs}
readonly BUCKET_NAME=${BUCKET_NAME:-osm2vectortiles-jobs}
readonly RABBITMQ_URI=${RABBITMQ_URI:-"amqp://osm:osm@rabbitmq:5672/?blocked_connection_timeout=1200&heartbeat=0"}
function export_remote_mbtiles() {
exec python export_remote.py "$RABBITMQ_URI" \
exec python -u export_remote.py "$RABBITMQ_URI" \
--tm2source="$DEST_PROJECT_DIR" \
--bucket="$BUCKET_NAME"
}

Wyświetl plik

@ -42,12 +42,10 @@ def s3_url(host, port, bucket_name, file_name):
def connect_s3(host, port, bucket_name):
# import boto
# boto.set_stream_logger('paws')
is_secure = port == 443
conn = S3Connection(
os.getenv('AWS_ACCESS_KEY_ID', 'dummy'),
os.getenv('AWS_SECRET_ACCESS_KEY', 'dummy'),
os.environ['AWS_ACCESS_KEY_ID'],
os.environ['AWS_SECRET_ACCESS_KEY'],
is_secure=is_secure,
port=port,
host=host,
@ -106,22 +104,31 @@ def optimize_mbtiles(mbtiles_file, mask_level=8):
mbtiles = MBTiles(mbtiles_file, 'tms')
for tile in find_optimizable_tiles(mbtiles, mask_level, 'tms'):
tiles = all_descendant_tiles(x=tile.x, y=tile.y, zoom=tile.z, max_zoom=14)
tiles = all_descendant_tiles(x=tile.x, y=tile.y,
zoom=tile.z, max_zoom=14)
mbtiles.remove_tiles(tiles)
def export_remote(tm2source, rabbitmq_url, queue_name, result_queue_name,
failed_queue_name, render_scheme, bucket_name):
host = os.getenv('AWS_S3_HOST', 'mock-s3')
port = int(os.getenv('AWS_S3_PORT', 8080))
if 'AWS_S3_HOST' not in os.environ:
sys.stderr.write('You need to specify the AWS_S3_HOST')
sys.exit(1)
host = os.environ['AWS_S3_HOST']
port = int(os.getenv('AWS_S3_PORT', 443))
print('Connect with S3 bucket {} at {}:{}'.format(
bucket_name, host, port
))
bucket = connect_s3(host, port, bucket_name)
connection = pika.BlockingConnection(pika.URLParameters(rabbitmq_url))
channel = connection.channel()
channel.basic_qos(prefetch_count=1)
channel.confirm_delivery()
configure_rabbitmq(channel)
print('Connect with RabbitMQ server {}'.format(rabbitmq_url))
def callback(ch, method, properties, body):
msg = json.loads(body.decode('utf-8'))
@ -136,6 +143,12 @@ def export_remote(tm2source, rabbitmq_url, queue_name, result_queue_name,
pyramid = msg['pyramid']
tileinfo = pyramid['tile']
print('Render pyramid {}/{} from z{} down to z{}'.format(
tileinfo['x'],
tileinfo['y'],
tileinfo['min_zoom'],
tileinfo['max_zoom'],
))
tilelive_cmd = render_pyramid_command(
source, sink,
bounds=create_tilelive_bbox(pyramid['bounds']),
@ -147,6 +160,9 @@ def export_remote(tm2source, rabbitmq_url, queue_name, result_queue_name,
with open(list_file, 'w') as fh:
write_list_file(fh, msg['tiles'])
print('Render {} tiles from list job'.format(
len(msg['tiles']),
))
tilelive_cmd = render_tile_list_command(
source, sink,
list_file=list_file,

Wyświetl plik

@ -71,9 +71,11 @@ def merge_results(rabbitmq_url, merge_target, result_queue_name):
channel = connection.channel()
channel.basic_qos(prefetch_count=3)
channel.confirm_delivery()
print('Connect with RabbitMQ server {}'.format(rabbitmq_url))
def callback(ch, method, properties, body):
msg = json.loads(body.decode('utf-8'))
print('Download {}'.format(msg['url']))
merge_source = download_mbtiles(msg['url'])
action = functools.partial(merge_mbtiles, merge_source, merge_target)
diff_size = compare_file_after_action(merge_target, action)

Wyświetl plik

@ -5,10 +5,10 @@ set -o nounset
readonly EXPORT_DIR=${EXPORT_DIR:-/data/export}
readonly MERGE_TARGET=${MERGE_TARGET:-"$EXPORT_DIR/planet.mbtiles"}
readonly RABBITMQ_URI=${RABBITMQ_URI:-"amqp://osm:osm@rabbitmq:5672/"}
readonly RABBITMQ_URI=${RABBITMQ_URI:-"amqp://osm:osm@rabbitmq:5672/?blocked_connection_timeout=1200&heartbeat=0"}
function export_remote_mbtiles() {
exec python merge-jobs.py "$RABBITMQ_URI" \
exec python -u merge-jobs.py "$RABBITMQ_URI" \
--merge-target="$MERGE_TARGET"
}

Wyświetl plik

@ -0,0 +1,9 @@
# integration-test
Integration test for entire OSM2VectorTiles workflow.
Primary purpose is to verify that everything works together
not to verify correctness.
Take a look at the `.travis.yml` file to see how it is called.
The setup requires a working S3 endpoint to test the distributed
workflow.

Wyświetl plik

@ -17,6 +17,9 @@ PROJECT_DIR = os.path.abspath(os.getenv('PROJECT_DIR', PARENT_PROJECT_DIR))
ALBANIA_BBOX = '19.6875,40.97989806962015,20.390625,41.50857729743933'
ALBANIA_TIRANA_TILE = (284, 191, 9)
BUCKET = os.getenv('BUCKET', 'osm2vectortiles-testing')
AWS_REGION = os.getenv('AWS_S3_HOST', 'os.zhdk.cloud.switch.ch')
class DockerCompose(object):
def __init__(self, project_dir=PROJECT_DIR):
@ -125,9 +128,9 @@ def test_distributed_worker():
job_zoom = tile_z + 1
schedule_tile_jobs(tile_x, tile_y, tile_z, job_zoom)
dc.run(['-d', '-e', 'BUCKET_NAME={}'.format(BUCKET), 'export-worker'])
dc.up('merge-jobs')
dc.up('export-worker')
time.sleep(120)
time.sleep(240)
dc.stop('export-worker')
dc.stop('merge-jobs')
@ -136,8 +139,8 @@ def test_distributed_worker():
# if MBTiles contains all the Albania tiles at job zoom level
# the export was successful
exported_mbtiles = os.path.join(PROJECT_DIR, 'export/planet.mbtiles')
tiles = find_missing_tiles(exported_mbtiles, tile_x, tile_y, job_zoom, 13)
assert tiles == []
tiles = find_missing_tiles(exported_mbtiles, tile_x, tile_y, tile_z, 13)
assert [t for t in tiles if t.z > tile_z] == []
@pytest.mark.run(order=7)
@ -172,10 +175,3 @@ def test_diff_jobs():
dc.up('merge-jobs')
time.sleep(10)
dc.stop('merge-jobs')
# Test if the MBTiles is still complete
# This does not verify whether new data has been added successfully
exported_mbtiles = os.path.join(PROJECT_DIR, 'export/planet.mbtiles')
tile_x, tile_y, tile_z = ALBANIA_TIRANA_TILE
tiles = find_missing_tiles(exported_mbtiles, tile_x, tile_y, tile_z, 13)
assert tiles == []

Wyświetl plik

@ -1,4 +1,3 @@
pytest==2.9.1
docker-compose==1.7.1
pytest-ordering==0.4
-e git+https://github.com/lukasmartinelli/mbtoolbox.git@#egg=mbtoolbox