kopia lustrzana https://github.com/OpenDroneMap/NodeODM
Added s3_upload_everything flag
rodzic
91d78d814e
commit
bcbec55c03
|
@ -45,6 +45,7 @@ Options:
|
||||||
--s3_access_key <key> S3 access key, required if --s3_endpoint is set. (default: none)
|
--s3_access_key <key> S3 access key, required if --s3_endpoint is set. (default: none)
|
||||||
--s3_secret_key <secret> S3 secret key, required if --s3_endpoint is set. (default: none)
|
--s3_secret_key <secret> S3 secret key, required if --s3_endpoint is set. (default: none)
|
||||||
--s3_signature_version <version> S3 signature version. (default: 4)
|
--s3_signature_version <version> S3 signature version. (default: 4)
|
||||||
|
--s3_upload_everything Upload all task results to S3. (default: upload only .zip archive and orthophoto)
|
||||||
--max_concurrency <number> Place a cap on the max-concurrency option to use for each task. (default: no limit)
|
--max_concurrency <number> Place a cap on the max-concurrency option to use for each task. (default: no limit)
|
||||||
Log Levels:
|
Log Levels:
|
||||||
error | debug | info | verbose | debug | silly
|
error | debug | info | verbose | debug | silly
|
||||||
|
@ -101,6 +102,7 @@ config.s3Bucket = argv.s3_bucket || fromConfigFile("s3Bucket", "");
|
||||||
config.s3AccessKey = argv.s3_access_key || fromConfigFile("s3AccessKey", process.env.AWS_ACCESS_KEY_ID || "")
|
config.s3AccessKey = argv.s3_access_key || fromConfigFile("s3AccessKey", process.env.AWS_ACCESS_KEY_ID || "")
|
||||||
config.s3SecretKey = argv.s3_secret_key || fromConfigFile("s3SecretKey", process.env.AWS_SECRET_ACCESS_KEY || "")
|
config.s3SecretKey = argv.s3_secret_key || fromConfigFile("s3SecretKey", process.env.AWS_SECRET_ACCESS_KEY || "")
|
||||||
config.s3SignatureVersion = argv.s3_signature_version || fromConfigFile("s3SignatureVersion", "4")
|
config.s3SignatureVersion = argv.s3_signature_version || fromConfigFile("s3SignatureVersion", "4")
|
||||||
|
config.s3UploadEverything = argv.s3_upload_everything || fromConfigFile("s3UploadEverything", false);
|
||||||
config.maxConcurrency = parseInt(argv.max_concurrency || fromConfigFile("maxConcurrency", 0));
|
config.maxConcurrency = parseInt(argv.max_concurrency || fromConfigFile("maxConcurrency", 0));
|
||||||
|
|
||||||
module.exports = config;
|
module.exports = config;
|
||||||
|
|
|
@ -66,7 +66,7 @@ module.exports = {
|
||||||
uploadPaths: function(srcFolder, bucket, dstFolder, paths, cb, onOutput){
|
uploadPaths: function(srcFolder, bucket, dstFolder, paths, cb, onOutput){
|
||||||
if (!s3) throw new Error("S3 is not initialized");
|
if (!s3) throw new Error("S3 is not initialized");
|
||||||
|
|
||||||
const PARALLEL_UPLOADS = 10;
|
const PARALLEL_UPLOADS = 5;
|
||||||
|
|
||||||
const q = async.queue((file, done) => {
|
const q = async.queue((file, done) => {
|
||||||
logger.debug(`Uploading ${file.src} --> ${file.dest}`);
|
logger.debug(`Uploading ${file.src} --> ${file.dest}`);
|
||||||
|
|
11
libs/Task.js
11
libs/Task.js
|
@ -342,9 +342,14 @@ module.exports = class Task{
|
||||||
// Upload to S3 all paths + all.zip file (if config says so)
|
// Upload to S3 all paths + all.zip file (if config says so)
|
||||||
if (S3.enabled()){
|
if (S3.enabled()){
|
||||||
tasks.push((done) => {
|
tasks.push((done) => {
|
||||||
const s3Paths = !config.test ?
|
let s3Paths;
|
||||||
['all.zip'].concat(allPaths) :
|
if (config.test){
|
||||||
['all.zip']; // During testing only upload all.zip
|
s3Paths = ['all.zip']; // During testing only upload all.zip
|
||||||
|
}else if (config.s3UploadEverything){
|
||||||
|
s3Paths = ['all.zip'].concat(allPaths)
|
||||||
|
}else{
|
||||||
|
s3Paths = ['all.zip', 'odm_orthophoto/odm_orthophoto.tif'];
|
||||||
|
}
|
||||||
|
|
||||||
S3.uploadPaths(this.getProjectFolderPath(), config.s3Bucket, this.uuid, s3Paths,
|
S3.uploadPaths(this.getProjectFolderPath(), config.s3Bucket, this.uuid, s3Paths,
|
||||||
err => {
|
err => {
|
||||||
|
|
Ładowanie…
Reference in New Issue