kopia lustrzana https://github.com/bellingcat/auto-archiver
commit
aaca6efac1
|
@ -10,6 +10,8 @@ If you are using `pipenv` (recommended), `pipenv install` is sufficient to insta
|
||||||
|
|
||||||
[ffmpeg](https://www.ffmpeg.org/) must also be installed locally for this tool to work.
|
[ffmpeg](https://www.ffmpeg.org/) must also be installed locally for this tool to work.
|
||||||
|
|
||||||
|
[firefox](https://www.mozilla.org/en-US/firefox/new/) and [geckodriver](https://github.com/mozilla/geckodriver/releases) on a path folder like `/usr/local/bin`.
|
||||||
|
|
||||||
A `.env` file is required for saving content to a Digital Ocean space, and for archiving pages to the Internet Archive. This file should also be in the script directory, and should contain the following variables:
|
A `.env` file is required for saving content to a Digital Ocean space, and for archiving pages to the Internet Archive. This file should also be in the script directory, and should contain the following variables:
|
||||||
|
|
||||||
```
|
```
|
||||||
|
@ -82,6 +84,7 @@ graph TD
|
||||||
A -->|parent of| C(TikTokArchiver)
|
A -->|parent of| C(TikTokArchiver)
|
||||||
A -->|parent of| D(YoutubeDLArchiver)
|
A -->|parent of| D(YoutubeDLArchiver)
|
||||||
A -->|parent of| E(WaybackArchiver)
|
A -->|parent of| E(WaybackArchiver)
|
||||||
|
A -->|parent of| F(TwitterArchiver)
|
||||||
```
|
```
|
||||||
### Current Storages
|
### Current Storages
|
||||||
```mermaid
|
```mermaid
|
||||||
|
|
|
@ -43,8 +43,10 @@ class TelegramArchiver(Archiver):
|
||||||
images += urls
|
images += urls
|
||||||
|
|
||||||
page_cdn, page_hash, thumbnail = self.generate_media_page(images, url, html.escape(str(t.content)))
|
page_cdn, page_hash, thumbnail = self.generate_media_page(images, url, html.escape(str(t.content)))
|
||||||
|
time_elements = s.find_all('time')
|
||||||
|
timestamp = time_elements[0].get('datetime') if len(time_elements) else None
|
||||||
|
|
||||||
return ArchiveResult(status="success", cdn_url=page_cdn, screenshot=screenshot, hash=page_hash, thumbnail=thumbnail, timestamp=s.find_all('time')[0].get('datetime'))
|
return ArchiveResult(status="success", cdn_url=page_cdn, screenshot=screenshot, hash=page_hash, thumbnail=thumbnail, timestamp=timestamp)
|
||||||
|
|
||||||
video_url = video.get('src')
|
video_url = video.get('src')
|
||||||
video_id = video_url.split('/')[-1].split('?')[0]
|
video_id = video_url.split('/')[-1].split('?')[0]
|
||||||
|
|
|
@ -7,6 +7,7 @@ import gspread
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
from selenium import webdriver
|
from selenium import webdriver
|
||||||
|
import traceback
|
||||||
|
|
||||||
import archivers
|
import archivers
|
||||||
from storages import S3Storage, S3Config
|
from storages import S3Storage, S3Config
|
||||||
|
@ -60,7 +61,7 @@ def expand_url(url):
|
||||||
return url
|
return url
|
||||||
|
|
||||||
|
|
||||||
def process_sheet(sheet, header=1):
|
def process_sheet(sheet, header=1, columns=GWorksheet.COLUMN_NAMES):
|
||||||
gc = gspread.service_account(filename='service_account.json')
|
gc = gspread.service_account(filename='service_account.json')
|
||||||
sh = gc.open(sheet)
|
sh = gc.open(sheet)
|
||||||
|
|
||||||
|
@ -78,17 +79,17 @@ def process_sheet(sheet, header=1):
|
||||||
|
|
||||||
# loop through worksheets to check
|
# loop through worksheets to check
|
||||||
for ii, wks in enumerate(sh.worksheets()):
|
for ii, wks in enumerate(sh.worksheets()):
|
||||||
logger.info(f'Opening worksheet {ii}: "{wks.title}"')
|
logger.info(f'Opening worksheet {ii}: "{wks.title}" header={header}')
|
||||||
gw = GWorksheet(wks, header_row=header)
|
gw = GWorksheet(wks, header_row=header, columns=columns)
|
||||||
|
|
||||||
if not gw.col_exists('url'):
|
if not gw.col_exists('url'):
|
||||||
logger.warning(
|
logger.warning(
|
||||||
f'No "Media URL" column found, skipping worksheet {wks.title}')
|
f'No "{columns["url"]}" column found, skipping worksheet {wks.title}')
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if not gw.col_exists('status'):
|
if not gw.col_exists('status'):
|
||||||
logger.warning(
|
logger.warning(
|
||||||
f'No "Archive status" column found, skipping worksheet {wks.title}')
|
f'No "{columns["status"]}" column found, skipping worksheet {wks.title}')
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# archives will be in a folder 'doc_name/worksheet_name'
|
# archives will be in a folder 'doc_name/worksheet_name'
|
||||||
|
@ -104,62 +105,60 @@ def process_sheet(sheet, header=1):
|
||||||
archivers.WaybackArchiver(s3_client, driver)
|
archivers.WaybackArchiver(s3_client, driver)
|
||||||
]
|
]
|
||||||
|
|
||||||
values = gw.get_values()
|
|
||||||
# loop through rows in worksheet
|
# loop through rows in worksheet
|
||||||
for row in range(1 + header, gw.count_rows() + 1):
|
for row in range(1 + header, gw.count_rows() + 1):
|
||||||
row_values = values[row-1]
|
url = gw.get_cell(row, 'url')
|
||||||
url = gw.get_cell(row_values, 'url')
|
original_status = gw.get_cell(row, 'status')
|
||||||
status = gw.get_cell(row_values, 'status')
|
status = gw.get_cell(row, 'status', fresh=original_status in ['', None])
|
||||||
if url != '' and status in ['', None]:
|
if url != '' and status in ['', None]:
|
||||||
url = gw.get_cell(row, 'url')
|
gw.set_cell(row, 'status', 'Archive in progress')
|
||||||
status = gw.get_cell(status, 'status')
|
|
||||||
|
|
||||||
if url != '' and status in ['', None]:
|
url = expand_url(url)
|
||||||
gw.set_cell(row, 'status', 'Archive in progress')
|
|
||||||
|
|
||||||
url = expand_url(url)
|
for archiver in active_archivers:
|
||||||
|
logger.debug(f'Trying {archiver} on row {row}')
|
||||||
|
|
||||||
for archiver in active_archivers:
|
try:
|
||||||
logger.debug(f'Trying {archiver} on row {row}')
|
result = archiver.download(url, check_if_exists=True)
|
||||||
|
except Exception as e:
|
||||||
try:
|
result = False
|
||||||
result = archiver.download(url, check_if_exists=True)
|
logger.error(f'Got unexpected error in row {row} with archiver {archiver} for url {url}: {e}\n{traceback.format_exc()}')
|
||||||
except Exception as e:
|
|
||||||
result = False
|
|
||||||
logger.error(
|
|
||||||
f'Got unexpected error in row {row} with archiver {archiver} for url {url}: {e}')
|
|
||||||
|
|
||||||
if result:
|
|
||||||
if result.status in ['success', 'already archived']:
|
|
||||||
result.status = archiver.name + \
|
|
||||||
": " + str(result.status)
|
|
||||||
logger.success(
|
|
||||||
f'{archiver} succeeded on row {row}')
|
|
||||||
break
|
|
||||||
logger.warning(
|
|
||||||
f'{archiver} did not succeed on row {row}, final status: {result.status}')
|
|
||||||
result.status = archiver.name + \
|
|
||||||
": " + str(result.status)
|
|
||||||
|
|
||||||
if result:
|
if result:
|
||||||
update_sheet(gw, row, result)
|
if result.status in ['success', 'already archived']:
|
||||||
else:
|
result.status = archiver.name + \
|
||||||
gw.set_cell(row, 'status', 'failed: no archiver')
|
": " + str(result.status)
|
||||||
|
logger.success(
|
||||||
|
f'{archiver} succeeded on row {row}')
|
||||||
|
break
|
||||||
|
logger.warning(
|
||||||
|
f'{archiver} did not succeed on row {row}, final status: {result.status}')
|
||||||
|
result.status = archiver.name + \
|
||||||
|
": " + str(result.status)
|
||||||
|
|
||||||
|
if result:
|
||||||
|
update_sheet(gw, row, result)
|
||||||
|
else:
|
||||||
|
gw.set_cell(row, 'status', 'failed: no archiver')
|
||||||
|
logger.success(f'Finshed worksheet {wks.title}')
|
||||||
driver.quit()
|
driver.quit()
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description='Automatically archive social media videos from a Google Sheets document')
|
description='Automatically archive social media videos from a Google Sheets document')
|
||||||
parser.add_argument('--sheet', action='store', dest='sheet')
|
parser.add_argument('--sheet', action='store', dest='sheet', help='the name of the google sheets document', required=True)
|
||||||
parser.add_argument('--header', action='store', dest='header', default=1, type=int)
|
parser.add_argument('--header', action='store', dest='header', default=1, type=int, help='1-based index for the header row')
|
||||||
args = parser.parse_args()
|
for k, v in GWorksheet.COLUMN_NAMES.items():
|
||||||
|
parser.add_argument(f'--col-{k}', action='store', dest=k, default=v, help=f'the name of the column to fill with {k} (defaults={v})')
|
||||||
|
|
||||||
logger.info(f'Opening document {args.sheet}')
|
args = parser.parse_args()
|
||||||
|
config_columns = {k: getattr(args, k).lower() for k in GWorksheet.COLUMN_NAMES.keys()}
|
||||||
|
|
||||||
|
logger.info(f'Opening document {args.sheet} for header {args.header}')
|
||||||
|
|
||||||
mkdir_if_not_exists('tmp')
|
mkdir_if_not_exists('tmp')
|
||||||
process_sheet(args.sheet, header=args.header)
|
process_sheet(args.sheet, header=args.header, columns=config_columns)
|
||||||
shutil.rmtree('tmp')
|
shutil.rmtree('tmp')
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -45,5 +45,5 @@ class S3Storage(Storage):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def uploadf(self, file, key, **kwargs):
|
def uploadf(self, file, key, **kwargs):
|
||||||
extra_args = kwargs["extra_args"] if "extra_args" in kwargs else {'ACL': 'public-read'}
|
extra_args = kwargs.get("extra_args", {'ACL': 'public-read'})
|
||||||
self.s3.upload_fileobj(file, Bucket=self.bucket, Key=self._get_path(key), ExtraArgs=extra_args)
|
self.s3.upload_fileobj(file, Bucket=self.bucket, Key=self._get_path(key), ExtraArgs=extra_args)
|
||||||
|
|
|
@ -2,6 +2,12 @@ from gspread import utils
|
||||||
|
|
||||||
|
|
||||||
class GWorksheet:
|
class GWorksheet:
|
||||||
|
"""
|
||||||
|
This class makes read/write operations to the a worksheet easier.
|
||||||
|
It can read the headers from a custom row number, but the row references
|
||||||
|
should always include the offset of the header.
|
||||||
|
eg: if header=4, row 5 will be the first with data.
|
||||||
|
"""
|
||||||
COLUMN_NAMES = {
|
COLUMN_NAMES = {
|
||||||
'url': 'link',
|
'url': 'link',
|
||||||
'archive': 'archive location',
|
'archive': 'archive location',
|
||||||
|
@ -18,7 +24,8 @@ class GWorksheet:
|
||||||
|
|
||||||
def __init__(self, worksheet, columns=COLUMN_NAMES, header_row=1):
|
def __init__(self, worksheet, columns=COLUMN_NAMES, header_row=1):
|
||||||
self.wks = worksheet
|
self.wks = worksheet
|
||||||
self.headers = [v.lower() for v in self.wks.row_values(header_row)]
|
self.values = self.wks.get_values()
|
||||||
|
self.headers = [v.lower() for v in self.values[header_row - 1]]
|
||||||
self.columns = columns
|
self.columns = columns
|
||||||
|
|
||||||
def _check_col_exists(self, col: str):
|
def _check_col_exists(self, col: str):
|
||||||
|
@ -34,25 +41,29 @@ class GWorksheet:
|
||||||
return self.columns[col] in self.headers
|
return self.columns[col] in self.headers
|
||||||
|
|
||||||
def count_rows(self):
|
def count_rows(self):
|
||||||
return len(self.wks.get_values())
|
return len(self.values)
|
||||||
|
|
||||||
def get_row(self, row: int):
|
def get_row(self, row: int):
|
||||||
# row is 1-based
|
# row is 1-based
|
||||||
return self.wks.row_values(row)
|
return self.values[row - 1]
|
||||||
|
|
||||||
def get_values(self):
|
def get_values(self):
|
||||||
return self.wks.get_values()
|
return self.values
|
||||||
|
|
||||||
def get_cell(self, row, col: str):
|
def get_cell(self, row, col: str, fresh=False):
|
||||||
"""
|
"""
|
||||||
returns the cell value from (row, col),
|
returns the cell value from (row, col),
|
||||||
where row can be an index (1-based) OR list of values
|
where row can be an index (1-based) OR list of values
|
||||||
as received from self.get_row(row)
|
as received from self.get_row(row)
|
||||||
|
if fresh=True, the sheet is queried again for this cell
|
||||||
"""
|
"""
|
||||||
|
col_index = self._col_index(col)
|
||||||
|
|
||||||
|
if fresh:
|
||||||
|
return self.wks.cell(row, col_index + 1).value
|
||||||
if type(row) == int:
|
if type(row) == int:
|
||||||
row = self.get_row(row)
|
row = self.get_row(row)
|
||||||
|
|
||||||
col_index = self._col_index(col)
|
|
||||||
if col_index >= len(row):
|
if col_index >= len(row):
|
||||||
return ''
|
return ''
|
||||||
return row[col_index]
|
return row[col_index]
|
||||||
|
|
Ładowanie…
Reference in New Issue