auto-archiver/auto_archive.py

150 wiersze
5.4 KiB
Python
Czysty Zwykły widok Historia

2022-05-03 18:34:04 +00:00
# import os
2022-02-21 13:19:09 +00:00
import datetime
2022-05-03 18:34:04 +00:00
# import argparse
import requests
2022-02-23 15:43:42 +00:00
import shutil
2022-05-03 18:34:04 +00:00
# import gspread
from loguru import logger
2022-02-21 13:19:09 +00:00
from dotenv import load_dotenv
2022-05-03 18:34:04 +00:00
# from selenium import webdriver
import traceback
2022-02-21 13:19:09 +00:00
2022-05-03 18:34:04 +00:00
# import archivers
from archivers import TelethonArchiver, TelegramArchiver, TiktokArchiver, YoutubeDLArchiver, TwitterArchiver, WaybackArchiver, ArchiveResult
from storages import S3Storage
2022-02-23 15:32:38 +00:00
from utils import GWorksheet, mkdir_if_not_exists
2022-05-03 18:34:04 +00:00
from configs import Config
load_dotenv()
2021-05-03 12:16:09 +00:00
2022-05-03 18:34:04 +00:00
def update_sheet(gw, row, result: ArchiveResult):
cell_updates = []
row_values = gw.get_row(row)
2021-05-03 12:16:09 +00:00
2022-02-23 08:54:03 +00:00
def batch_if_valid(col, val, final_value=None):
final_value = final_value or val
if val and gw.col_exists(col) and gw.get_cell(row_values, col) == '':
cell_updates.append((row, col, final_value))
2021-03-18 10:03:13 +00:00
cell_updates.append((row, 'status', result.status))
2021-03-15 09:08:02 +00:00
2022-02-23 08:54:03 +00:00
batch_if_valid('archive', result.cdn_url)
batch_if_valid('date', True, datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat())
2022-02-25 15:09:35 +00:00
batch_if_valid('thumbnail', result.thumbnail,
f'=IMAGE("{result.thumbnail}")')
2022-02-23 08:54:03 +00:00
batch_if_valid('thumbnail_index', result.thumbnail_index)
batch_if_valid('title', result.title)
batch_if_valid('duration', result.duration, str(result.duration))
2022-02-25 15:09:35 +00:00
batch_if_valid('screenshot', result.screenshot)
batch_if_valid('hash', result.hash)
if result.timestamp is not None:
if type(result.timestamp) == int:
timestamp_string = datetime.datetime.fromtimestamp(result.timestamp).replace(tzinfo=datetime.timezone.utc).isoformat()
elif type(result.timestamp) == str:
timestamp_string = result.timestamp
else:
timestamp_string = result.timestamp.isoformat()
batch_if_valid('timestamp', timestamp_string)
2021-03-15 09:08:02 +00:00
gw.batch_set_cell(cell_updates)
def expand_url(url):
# expand short URL links
if 'https://t.co/' in url:
try:
r = requests.get(url)
url = r.url
except:
logger.error(f'Failed to expand url {url}')
return url
2021-03-15 09:08:02 +00:00
2021-06-01 09:00:40 +00:00
2022-05-03 18:34:04 +00:00
def process_sheet(c: Config, sheet, header=1, columns=GWorksheet.COLUMN_NAMES):
sh = c.gsheets_client.open(sheet)
# loop through worksheets to check
for ii, wks in enumerate(sh.worksheets()):
2022-05-03 18:34:04 +00:00
logger.info(f'Opening worksheet {ii}: "{wks.title}" header={c.header}')
gw = GWorksheet(wks, header_row=c.header, columns=c.column_names)
2021-03-15 09:08:02 +00:00
2022-02-23 08:57:04 +00:00
if not gw.col_exists('url'):
2022-02-25 15:09:35 +00:00
logger.warning(
2022-05-03 18:34:04 +00:00
f'No "{c.column_names["url"]}" column found, skipping worksheet {wks.title}')
2021-03-25 12:42:42 +00:00
continue
2022-02-23 08:57:04 +00:00
if not gw.col_exists('status'):
2022-02-25 15:09:35 +00:00
logger.warning(
2022-05-03 18:34:04 +00:00
f'No "{c.column_names["status"]}" column found, skipping worksheet {wks.title}')
2021-03-25 12:42:42 +00:00
continue
# archives will be in a folder 'doc_name/worksheet_name'
2022-05-09 12:54:48 +00:00
c.set_folder(f'{sheet.replace(" ", "_")}/{wks.title.replace(" ", "_")}/')
storage = c.get_storage()
2022-02-21 13:19:09 +00:00
# order matters, first to succeed excludes remaining
active_archivers = [
2022-05-09 12:54:48 +00:00
TelethonArchiver(storage, c.webdriver, c.telegram_config),
TelegramArchiver(storage, c.webdriver),
TiktokArchiver(storage, c.webdriver),
YoutubeDLArchiver(storage, c.webdriver),
TwitterArchiver(storage, c.webdriver),
WaybackArchiver(storage, c.webdriver)
]
# loop through rows in worksheet
2022-02-25 15:09:35 +00:00
for row in range(1 + header, gw.count_rows() + 1):
url = gw.get_cell(row, 'url')
original_status = gw.get_cell(row, 'status')
2022-03-14 10:10:51 +00:00
status = gw.get_cell(row, 'status', fresh=original_status in ['', None] and url != '')
2022-02-23 08:54:03 +00:00
if url != '' and status in ['', None]:
2022-03-09 10:46:14 +00:00
gw.set_cell(row, 'status', 'Archive in progress')
url = expand_url(url)
for archiver in active_archivers:
logger.debug(f'Trying {archiver} on row {row}')
try:
result = archiver.download(url, check_if_exists=True)
except Exception as e:
result = False
logger.error(f'Got unexpected error in row {row} with archiver {archiver} for url {url}: {e}\n{traceback.format_exc()}')
if result:
2022-03-09 10:46:14 +00:00
if result.status in ['success', 'already archived']:
result.status = archiver.name + \
": " + str(result.status)
logger.success(
f'{archiver} succeeded on row {row}')
break
logger.warning(
f'{archiver} did not succeed on row {row}, final status: {result.status}')
result.status = archiver.name + \
": " + str(result.status)
if result:
update_sheet(gw, row, result)
else:
gw.set_cell(row, 'status', 'failed: no archiver')
logger.success(f'Finshed worksheet {wks.title}')
2021-03-15 09:08:02 +00:00
2021-03-25 12:42:42 +00:00
def main():
2022-05-03 18:34:04 +00:00
c = Config()
c.parse()
2022-03-18 08:53:21 +00:00
2022-05-03 18:34:04 +00:00
logger.info(f'Opening document {c.sheet} for header {c.header}')
2022-05-03 18:34:04 +00:00
mkdir_if_not_exists(c.tmp_folder)
process_sheet(c, c.sheet, header=c.header, columns=c.column_names)
shutil.rmtree(c.tmp_folder)
c.webdriver.quit()
2021-06-01 09:00:40 +00:00
2022-02-25 15:09:35 +00:00
2022-02-23 08:57:04 +00:00
if __name__ == '__main__':
main()