Merge pull request #19 from bellingcat/screenshots

Merge feature branch
pull/25/head
Logan Williams 2022-03-14 09:51:57 +01:00 zatwierdzone przez GitHub
commit aaca6efac1
Nie znaleziono w bazie danych klucza dla tego podpisu
ID klucza GPG: 4AEE18F83AFDEB23
5 zmienionych plików z 66 dodań i 51 usunięć

Wyświetl plik

@ -10,6 +10,8 @@ If you are using `pipenv` (recommended), `pipenv install` is sufficient to insta
[ffmpeg](https://www.ffmpeg.org/) must also be installed locally for this tool to work.
[firefox](https://www.mozilla.org/en-US/firefox/new/) and [geckodriver](https://github.com/mozilla/geckodriver/releases) on a path folder like `/usr/local/bin`.
A `.env` file is required for saving content to a Digital Ocean space, and for archiving pages to the Internet Archive. This file should also be in the script directory, and should contain the following variables:
```
@ -82,6 +84,7 @@ graph TD
A -->|parent of| C(TikTokArchiver)
A -->|parent of| D(YoutubeDLArchiver)
A -->|parent of| E(WaybackArchiver)
A -->|parent of| F(TwitterArchiver)
```
### Current Storages
```mermaid

Wyświetl plik

@ -43,8 +43,10 @@ class TelegramArchiver(Archiver):
images += urls
page_cdn, page_hash, thumbnail = self.generate_media_page(images, url, html.escape(str(t.content)))
time_elements = s.find_all('time')
timestamp = time_elements[0].get('datetime') if len(time_elements) else None
return ArchiveResult(status="success", cdn_url=page_cdn, screenshot=screenshot, hash=page_hash, thumbnail=thumbnail, timestamp=s.find_all('time')[0].get('datetime'))
return ArchiveResult(status="success", cdn_url=page_cdn, screenshot=screenshot, hash=page_hash, thumbnail=thumbnail, timestamp=timestamp)
video_url = video.get('src')
video_id = video_url.split('/')[-1].split('?')[0]

Wyświetl plik

@ -7,6 +7,7 @@ import gspread
from loguru import logger
from dotenv import load_dotenv
from selenium import webdriver
import traceback
import archivers
from storages import S3Storage, S3Config
@ -60,7 +61,7 @@ def expand_url(url):
return url
def process_sheet(sheet, header=1):
def process_sheet(sheet, header=1, columns=GWorksheet.COLUMN_NAMES):
gc = gspread.service_account(filename='service_account.json')
sh = gc.open(sheet)
@ -78,17 +79,17 @@ def process_sheet(sheet, header=1):
# loop through worksheets to check
for ii, wks in enumerate(sh.worksheets()):
logger.info(f'Opening worksheet {ii}: "{wks.title}"')
gw = GWorksheet(wks, header_row=header)
logger.info(f'Opening worksheet {ii}: "{wks.title}" header={header}')
gw = GWorksheet(wks, header_row=header, columns=columns)
if not gw.col_exists('url'):
logger.warning(
f'No "Media URL" column found, skipping worksheet {wks.title}')
f'No "{columns["url"]}" column found, skipping worksheet {wks.title}')
continue
if not gw.col_exists('status'):
logger.warning(
f'No "Archive status" column found, skipping worksheet {wks.title}')
f'No "{columns["status"]}" column found, skipping worksheet {wks.title}')
continue
# archives will be in a folder 'doc_name/worksheet_name'
@ -104,62 +105,60 @@ def process_sheet(sheet, header=1):
archivers.WaybackArchiver(s3_client, driver)
]
values = gw.get_values()
# loop through rows in worksheet
for row in range(1 + header, gw.count_rows() + 1):
row_values = values[row-1]
url = gw.get_cell(row_values, 'url')
status = gw.get_cell(row_values, 'status')
url = gw.get_cell(row, 'url')
original_status = gw.get_cell(row, 'status')
status = gw.get_cell(row, 'status', fresh=original_status in ['', None])
if url != '' and status in ['', None]:
url = gw.get_cell(row, 'url')
status = gw.get_cell(status, 'status')
gw.set_cell(row, 'status', 'Archive in progress')
if url != '' and status in ['', None]:
gw.set_cell(row, 'status', 'Archive in progress')
url = expand_url(url)
url = expand_url(url)
for archiver in active_archivers:
logger.debug(f'Trying {archiver} on row {row}')
for archiver in active_archivers:
logger.debug(f'Trying {archiver} on row {row}')
try:
result = archiver.download(url, check_if_exists=True)
except Exception as e:
result = False
logger.error(
f'Got unexpected error in row {row} with archiver {archiver} for url {url}: {e}')
if result:
if result.status in ['success', 'already archived']:
result.status = archiver.name + \
": " + str(result.status)
logger.success(
f'{archiver} succeeded on row {row}')
break
logger.warning(
f'{archiver} did not succeed on row {row}, final status: {result.status}')
result.status = archiver.name + \
": " + str(result.status)
try:
result = archiver.download(url, check_if_exists=True)
except Exception as e:
result = False
logger.error(f'Got unexpected error in row {row} with archiver {archiver} for url {url}: {e}\n{traceback.format_exc()}')
if result:
update_sheet(gw, row, result)
else:
gw.set_cell(row, 'status', 'failed: no archiver')
if result.status in ['success', 'already archived']:
result.status = archiver.name + \
": " + str(result.status)
logger.success(
f'{archiver} succeeded on row {row}')
break
logger.warning(
f'{archiver} did not succeed on row {row}, final status: {result.status}')
result.status = archiver.name + \
": " + str(result.status)
if result:
update_sheet(gw, row, result)
else:
gw.set_cell(row, 'status', 'failed: no archiver')
logger.success(f'Finshed worksheet {wks.title}')
driver.quit()
def main():
parser = argparse.ArgumentParser(
description='Automatically archive social media videos from a Google Sheets document')
parser.add_argument('--sheet', action='store', dest='sheet')
parser.add_argument('--header', action='store', dest='header', default=1, type=int)
args = parser.parse_args()
parser.add_argument('--sheet', action='store', dest='sheet', help='the name of the google sheets document', required=True)
parser.add_argument('--header', action='store', dest='header', default=1, type=int, help='1-based index for the header row')
for k, v in GWorksheet.COLUMN_NAMES.items():
parser.add_argument(f'--col-{k}', action='store', dest=k, default=v, help=f'the name of the column to fill with {k} (defaults={v})')
logger.info(f'Opening document {args.sheet}')
args = parser.parse_args()
config_columns = {k: getattr(args, k).lower() for k in GWorksheet.COLUMN_NAMES.keys()}
logger.info(f'Opening document {args.sheet} for header {args.header}')
mkdir_if_not_exists('tmp')
process_sheet(args.sheet, header=args.header)
process_sheet(args.sheet, header=args.header, columns=config_columns)
shutil.rmtree('tmp')

Wyświetl plik

@ -45,5 +45,5 @@ class S3Storage(Storage):
return False
def uploadf(self, file, key, **kwargs):
extra_args = kwargs["extra_args"] if "extra_args" in kwargs else {'ACL': 'public-read'}
extra_args = kwargs.get("extra_args", {'ACL': 'public-read'})
self.s3.upload_fileobj(file, Bucket=self.bucket, Key=self._get_path(key), ExtraArgs=extra_args)

Wyświetl plik

@ -2,6 +2,12 @@ from gspread import utils
class GWorksheet:
"""
This class makes read/write operations to the a worksheet easier.
It can read the headers from a custom row number, but the row references
should always include the offset of the header.
eg: if header=4, row 5 will be the first with data.
"""
COLUMN_NAMES = {
'url': 'link',
'archive': 'archive location',
@ -18,7 +24,8 @@ class GWorksheet:
def __init__(self, worksheet, columns=COLUMN_NAMES, header_row=1):
self.wks = worksheet
self.headers = [v.lower() for v in self.wks.row_values(header_row)]
self.values = self.wks.get_values()
self.headers = [v.lower() for v in self.values[header_row - 1]]
self.columns = columns
def _check_col_exists(self, col: str):
@ -34,25 +41,29 @@ class GWorksheet:
return self.columns[col] in self.headers
def count_rows(self):
return len(self.wks.get_values())
return len(self.values)
def get_row(self, row: int):
# row is 1-based
return self.wks.row_values(row)
return self.values[row - 1]
def get_values(self):
return self.wks.get_values()
return self.values
def get_cell(self, row, col: str):
def get_cell(self, row, col: str, fresh=False):
"""
returns the cell value from (row, col),
where row can be an index (1-based) OR list of values
as received from self.get_row(row)
if fresh=True, the sheet is queried again for this cell
"""
col_index = self._col_index(col)
if fresh:
return self.wks.cell(row, col_index + 1).value
if type(row) == int:
row = self.get_row(row)
col_index = self._col_index(col)
if col_index >= len(row):
return ''
return row[col_index]