V0.9.0 - closes several open issues: new enrichers and bug fixes (#133)

* clean orchestrator code, add archiver cleanup logic

* improves documentation for database.py

* telethon archivers isolate sessions into copied files

* closes #127

* closes #125

* closes #84

* meta enricher applies to all media

* closes #61 adds subtitles and comments

* minor update

* minor fixes to yt-dlp subtitles and comments

* closes #17 but logic is imperfect.

* closes #85 ssl enhancer

* minimifies html, JS refactor for preview of certificates

* closes #91 adds freetsa timestamp authority

* version bump

* simplify download_url method

* skip ssl if nothing archived

* html preview improvements

* adds retrying lib

* manual download archiver improvements

* meta only runs when relevant data available

* new metadata convenience method

* html template improvements

* removes debug message

* does not close #91 yet, will need a few more certificate chaing logging

* adds verbosity config

* new instagram api archiver

* adds proxy support we

* adds proxy/end support and bug fix for yt-dlp

* proxy support for webdriver

* adds socks proxy to wacz_enricher

* refactor recursivity in inner media and display

* infinite recursive display

* foolproofing timestamping authortities

* version to 0.9.0

* minor fixes from code-review
pull/137/head v0.9.0
Miguel Sozinho Ramalho 2024-02-20 18:05:29 +00:00 zatwierdzone przez GitHub
rodzic 5c49124ac6
commit 7a21ae96af
Nie znaleziono w bazie danych klucza dla tego podpisu
ID klucza GPG: B5690EEEBB952194
34 zmienionych plików z 1696 dodań i 880 usunięć

Wyświetl plik

@ -8,9 +8,6 @@ name: Docker
on:
release:
types: [published]
push:
# branches: [ "main" ]
tags: [ "v*.*.*" ]
env:
# Use docker.io for Docker Hub if empty

Wyświetl plik

@ -11,9 +11,6 @@ name: Pypi
on:
release:
types: [published]
push:
# branches: [ "main" ]
tags: [ "v*.*.*" ]
permissions:
contents: read

Wyświetl plik

@ -36,6 +36,11 @@ requests = {extras = ["socks"], version = "*"}
numpy = "*"
warcio = "*"
jsonlines = "*"
pysubs2 = "*"
minify-html = "*"
retrying = "*"
tsp-client = "*"
certvalidator = "*"
[dev-packages]
autopep8 = "*"

1399
Pipfile.lock wygenerowano

Plik diff jest za duży Load Diff

Wyświetl plik

@ -233,12 +233,12 @@ working with docker locally:
* to use local archive, also create a volume `-v` for it by adding `-v $PWD/local_archive:/app/local_archive`
release to docker hub
manual release to docker hub
* `docker image tag auto-archiver bellingcat/auto-archiver:latest`
* `docker push bellingcat/auto-archiver`
#### RELEASE
* update version in [version.py](src/auto_archiver/version.py)
* run `bash ./scripts/release.sh` and confirm
* package is automatically updated in pypi
* docker image is automatically pushed to dockerhup
* go to github releases > new release > use `vx.y.z` for matching version notation
* package is automatically updated in pypi
* docker image is automatically pushed to dockerhup

Wyświetl plik

@ -1,19 +0,0 @@
#!/bin/bash
set -e
TAG=$(python -c 'from src.auto_archiver.version import __version__; print("v" + __version__)')
read -p "Creating new release for $TAG. Do you want to continue? [Y/n] " prompt
if [[ $prompt == "y" || $prompt == "Y" || $prompt == "yes" || $prompt == "Yes" ]]; then
# git add -A
# git commit -m "Bump version to $TAG for release" || true && git push
echo "Creating new git tag $TAG"
git tag "$TAG" -m "$TAG"
git push --tags
else
echo "Cancelled"
exit 1
fi

Wyświetl plik

@ -7,4 +7,5 @@ from .instagram_tbot_archiver import InstagramTbotArchiver
from .tiktok_archiver import TiktokArchiver
from .telegram_archiver import TelegramArchiver
from .vk_archiver import VkArchiver
from .youtubedl_archiver import YoutubeDLArchiver
from .youtubedl_archiver import YoutubeDLArchiver
from .instagram_api_archiver import InstagramAPIArchiver

Wyświetl plik

@ -3,6 +3,8 @@ from abc import abstractmethod
from dataclasses import dataclass
import os
import mimetypes, requests
from loguru import logger
from retrying import retry
from ..core import Metadata, Step, ArchivingContext
@ -23,6 +25,10 @@ class Archiver(Step):
# used when archivers need to login or do other one-time setup
pass
def cleanup(self) -> None:
# called when archivers are done, or upon errors, cleanup any resources
pass
def sanitize_url(self, url: str) -> str:
# used to clean unnecessary URL parameters OR unfurl redirect links
return url
@ -37,16 +43,17 @@ class Archiver(Step):
return mime.split("/")[0]
return ""
def download_from_url(self, url: str, to_filename: str = None, item: Metadata = None) -> str:
@retry(wait_random_min=500, wait_random_max=3500, stop_max_attempt_number=5)
def download_from_url(self, url: str, to_filename: str = None, verbose=True) -> str:
"""
downloads a URL to provided filename, or inferred from URL, returns local filename, if item is present will use its tmp_dir
downloads a URL to provided filename, or inferred from URL, returns local filename
"""
if not to_filename:
to_filename = url.split('/')[-1].split('?')[0]
if len(to_filename) > 64:
to_filename = to_filename[-64:]
if item:
to_filename = os.path.join(ArchivingContext.get_tmp_dir(), to_filename)
to_filename = os.path.join(ArchivingContext.get_tmp_dir(), to_filename)
if verbose: logger.debug(f"downloading {url[0:50]=} {to_filename=}")
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'
}

Wyświetl plik

@ -0,0 +1,272 @@
import re, requests
from datetime import datetime
from loguru import logger
from retrying import retry
from tqdm import tqdm
from . import Archiver
from ..core import Metadata
from ..core import Media
class InstagramAPIArchiver(Archiver):
"""
Uses an https://github.com/subzeroid/instagrapi API deployment to fetch instagram posts data
# TODO: improvement collect aggregates of locations[0].location and mentions for all posts
"""
name = "instagram_api_archiver"
global_pattern = re.compile(r"(?:(?:http|https):\/\/)?(?:www.)?(?:instagram.com)\/(stories(?:\/highlights)?|p|reel)?\/?([^\/\?]*)\/?(\d+)?")
def __init__(self, config: dict) -> None:
super().__init__(config)
self.assert_valid_string("access_token")
self.assert_valid_string("api_endpoint")
if self.api_endpoint[-1] == "/": self.api_endpoint = self.api_endpoint[:-1]
self.full_profile = bool(self.full_profile)
self.minimize_json_output = bool(self.minimize_json_output)
@staticmethod
def configs() -> dict:
return {
"access_token": {"default": None, "help": "a valid instagrapi-api token"},
"api_endpoint": {"default": None, "help": "API endpoint to use"},
"full_profile": {"default": False, "help": "if true, will download all posts, tagged posts, stories, and highlights for a profile, if false, will only download the profile pic and information."},
"minimize_json_output": {"default": True, "help": "if true, will remove empty values from the json output"},
}
def download(self, item: Metadata) -> Metadata:
url = item.get_url()
url.replace("instagr.com", "instagram.com").replace("instagr.am", "instagram.com")
insta_matches = self.global_pattern.findall(url)
logger.info(f"{insta_matches=}")
if not len(insta_matches) or len(insta_matches[0])!=3: return
if len(insta_matches) > 1:
logger.warning(f"Multiple instagram matches found in {url=}, using the first one")
return
g1, g2, g3 = insta_matches[0][0], insta_matches[0][1], insta_matches[0][2]
if g1 == "": return self.download_profile(item, g2)
elif g1 == "p": return self.download_post(item, g2, context="post")
elif g1 == "reel": return self.download_post(item, g2, context="reel")
elif g1 == "stories/highlights": return self.download_highlights(item, g2)
elif g1 == "stories":
if len(g3): return self.download_post(item, id=g3, context="story")
return self.download_stories(item, g2)
else:
logger.warning(f"Unknown instagram regex group match {g1=} found in {url=}")
return
@retry(wait_random_min=1000, wait_random_max=3000, stop_max_attempt_number=5)
def call_api(self, path: str, params: dict) -> dict:
headers = {
"accept": "application/json",
"x-access-key": self.access_token
}
logger.debug(f"calling {self.api_endpoint}/{path} with {params=}")
return requests.get(f"{self.api_endpoint}/{path}", headers=headers, params=params).json()
def cleanup_dict(self, d: dict | list) -> dict:
# repeats 3 times to remove nested empty values
if not self.minimize_json_output: return d
if type(d) == list: return [self.cleanup_dict(v) for v in d]
if type(d) != dict: return d
return {
k: self.cleanup_dict(v) if type(v) in [dict, list] else v
for k, v in d.items()
if v not in [0.0, 0, [], {}, "", None, "null"] and
k not in ["x", "y", "width", "height"]
}
def download_profile(self, result: Metadata, username: str) -> Metadata:
# download basic profile info
url = result.get_url()
user = self.call_api("v2/user/by/username", {"username": username}).get("user")
assert user, f"User {username} not found"
user = self.cleanup_dict(user)
result.set_title(user.get("full_name", username)).set("data", user)
if pic_url := user.get("profile_pic_url_hd", user.get("profile_pic_url")):
filename = self.download_from_url(pic_url)
result.add_media(Media(filename=filename), id=f"profile_picture")
if self.full_profile:
user_id = user.get("pk")
# download all posts
self.download_all_posts(result, user_id)
# download all stories
try:
stories = self._download_stories_reusable(result, username)
result.set("#stories", len(stories))
except Exception as e:
result.append("errors", f"Error downloading stories for {username}")
logger.error(f"Error downloading stories for {username}: {e}")
# download all highlights
try:
count_highlights = 0
highlights = self.call_api(f"v1/user/highlights", {"user_id": user_id})
for h in highlights:
try:
h_info = self._download_highlights_reusable(result, h.get("pk"))
count_highlights += len(h_info.get("items", []))
except Exception as e:
result.append("errors", f"Error downloading highlight id{h.get('pk')} for {username}")
logger.error(f"Error downloading highlight id{h.get('pk')} for {username}: {e}")
result.set("#highlights", count_highlights)
except Exception as e:
result.append("errors", f"Error downloading highlights for {username}")
logger.error(f"Error downloading highlights for {username}: {e}")
result.set_url(url) # reset as scrape_item modifies it
return result.success("insta profile")
def download_post(self, result: Metadata, code: str = None, id: str = None, context: str = None) -> Metadata:
if id:
post = self.call_api(f"v1/media/by/id", {"id": id})
else:
post = self.call_api(f"v1/media/by/code", {"code": code})
assert post, f"Post {id or code} not found"
if caption_text := post.get("caption_text"):
result.set_title(caption_text)
post = self.scrape_item(result, post, context)
if post.get("taken_at"): result.set_timestamp(post.get("taken_at"))
return result.success(f"insta {context or 'post'}")
def download_highlights(self, result: Metadata, id: str) -> Metadata:
h_info = self._download_highlights_reusable(result, id)
items = len(h_info.get("items", []))
del h_info["items"]
result.set_title(h_info.get("title")).set("data", h_info).set("#reels", items)
return result.success("insta highlights")
def _download_highlights_reusable(self, result: Metadata, id: str) ->dict:
full_h = self.call_api(f"v2/highlight/by/id", {"id": id})
h_info = full_h.get("response", {}).get("reels", {}).get(f"highlight:{id}")
assert h_info, f"Highlight {id} not found: {full_h=}"
if cover_media := h_info.get("cover_media", {}).get("cropped_image_version", {}).get("url"):
filename = self.download_from_url(cover_media)
result.add_media(Media(filename=filename), id=f"cover_media highlight {id}")
items = h_info.get("items", [])[::-1] # newest to oldest
for h in tqdm(items, desc="downloading highlights", unit="highlight"):
try: self.scrape_item(result, h, "highlight")
except Exception as e:
result.append("errors", f"Error downloading highlight {h.get('id')}")
logger.error(f"Error downloading highlight, skipping {h.get('id')}: {e}")
return h_info
def download_stories(self, result: Metadata, username: str) -> Metadata:
now = datetime.now().strftime("%Y-%m-%d_%H-%M")
stories = self._download_stories_reusable(result, username)
result.set_title(f"stories {username} at {now}").set("#stories", len(stories))
return result.success(f"insta stories {now}")
def _download_stories_reusable(self, result: Metadata, username: str) -> list[dict]:
stories = self.call_api(f"v1/user/stories/by/username", {"username": username})
assert stories, f"Stories for {username} not found"
stories = stories[::-1] # newest to oldest
for s in tqdm(stories, desc="downloading stories", unit="story"):
try: self.scrape_item(result, s, "story")
except Exception as e:
result.append("errors", f"Error downloading story {s.get('id')}")
logger.error(f"Error downloading story, skipping {s.get('id')}: {e}")
return stories
def download_all_posts(self, result: Metadata, user_id: str):
end_cursor = None
pbar = tqdm(desc="downloading posts")
post_count = 0
while end_cursor != "":
posts = self.call_api(f"v1/user/medias/chunk", {"user_id": user_id, "end_cursor": end_cursor})
if not len(posts): break
posts, end_cursor = posts[0], posts[1]
logger.info(f"parsing {len(posts)} posts, next {end_cursor=}")
for p in posts:
try: self.scrape_item(result, p, "post")
except Exception as e:
result.append("errors", f"Error downloading post {p.get('id')}")
logger.error(f"Error downloading post, skipping {p.get('id')}: {e}")
pbar.update(1)
post_count+=1
result.set("#posts", post_count)
### reusable parsing utils below
def scrape_item(self, result:Metadata, item:dict, context:str=None) -> dict:
"""
receives a Metadata and an API dict response
fetches the media and adds it to the Metadata
cleans and returns the API dict
context can be used to give specific id prefixes to media
"""
if "clips_metadata" in item:
if reusable_text := item.get("clips_metadata", {}).get("reusable_text_attribute_string"):
item["clips_metadata_text"] = reusable_text
if self.minimize_json_output:
del item["clips_metadata"]
if code := item.get("code"):
result.set("url", f"https://www.instagram.com/p/{code}/")
resources = item.get("resources", [])
item, media, media_id = self.scrape_media(item, context)
# if resources are present take the main media from the first resource
if not media and len(resources):
_, media, media_id = self.scrape_media(resources[0], context)
resources = resources[1:]
assert media, f"Image/video not found in {item=}"
# posts with multiple items contain a resources list
resources_metadata = Metadata()
for r in resources:
self.scrape_item(resources_metadata, r)
if not resources_metadata.is_empty():
media.set("other media", resources_metadata.media)
result.add_media(media, id=media_id)
return item
def scrape_media(self, item: dict, context:str) -> tuple[dict, Media, str]:
# remove unnecessary info
if self.minimize_json_output:
for k in ["image_versions", "video_versions", "video_dash_manifest"]:
if k in item: del item[k]
item = self.cleanup_dict(item)
image_media = None
if image_url := item.get("thumbnail_url"):
filename = self.download_from_url(image_url, verbose=False)
image_media = Media(filename=filename)
# retrieve video info
best_id = item.get('id', item.get('pk'))
taken_at = item.get("taken_at")
code = item.get("code")
if video_url := item.get("video_url"):
filename = self.download_from_url(video_url, verbose=False)
video_media = Media(filename=filename)
if taken_at: video_media.set("date", taken_at)
if code: video_media.set("url", f"https://www.instagram.com/p/{code}")
video_media.set("preview", [image_media])
video_media.set("data", [item])
return item, video_media, f"{context or 'video'} {best_id}"
elif image_media:
if taken_at: image_media.set("date", taken_at)
if code: image_media.set("url", f"https://www.instagram.com/p/{code}")
image_media.set("data", [item])
return item, image_media, f"{context or 'image'} {best_id}"
return item, None, None

Wyświetl plik

@ -1,10 +1,12 @@
import shutil
from telethon.sync import TelegramClient
from loguru import logger
import time, os
from sqlite3 import OperationalError
from . import Archiver
from ..core import Metadata, Media, ArchivingContext
from ..utils import random_str
class InstagramTbotArchiver(Archiver):
@ -20,10 +22,6 @@ class InstagramTbotArchiver(Archiver):
self.assert_valid_string("api_id")
self.assert_valid_string("api_hash")
self.timeout = int(self.timeout)
try:
self.client = TelegramClient(self.session_file, self.api_id, self.api_hash)
except OperationalError as e:
logger.error(f"Unable to access the {self.session_file} session, please make sure you don't use the same session file here and in telethon_archiver. if you do then disable at least one of the archivers for the 1st time you setup telethon session: {e}")
@staticmethod
def configs() -> dict:
@ -35,10 +33,30 @@ class InstagramTbotArchiver(Archiver):
}
def setup(self) -> None:
"""
1. makes a copy of session_file that is removed in cleanup
2. checks if the session file is valid
"""
logger.info(f"SETUP {self.name} checking login...")
# make a copy of the session that is used exclusively with this archiver instance
new_session_file = os.path.join("secrets/", f"instabot-{time.strftime('%Y-%m-%d')}{random_str(8)}.session")
shutil.copy(self.session_file + ".session", new_session_file)
self.session_file = new_session_file
try:
self.client = TelegramClient(self.session_file, self.api_id, self.api_hash)
except OperationalError as e:
logger.error(f"Unable to access the {self.session_file} session, please make sure you don't use the same session file here and in telethon_archiver. if you do then disable at least one of the archivers for the 1st time you setup telethon session: {e}")
with self.client.start():
logger.success(f"SETUP {self.name} login works.")
def cleanup(self) -> None:
logger.info(f"CLEANUP {self.name}.")
if os.path.exists(self.session_file):
os.remove(self.session_file)
def download(self, item: Metadata) -> Metadata:
url = item.get_url()
if not "instagram.com" in url: return False

Wyświetl plik

@ -53,10 +53,10 @@ class TelegramArchiver(Archiver):
if not len(image_urls): return False
for img_url in image_urls:
result.add_media(Media(self.download_from_url(img_url, item=item)))
result.add_media(Media(self.download_from_url(img_url)))
else:
video_url = video.get('src')
m_video = Media(self.download_from_url(video_url, item=item))
m_video = Media(self.download_from_url(video_url))
# extract duration from HTML
try:
duration = s.find_all('time')[0].contents[0]

Wyświetl plik

@ -1,4 +1,5 @@
import shutil
from telethon.sync import TelegramClient
from telethon.errors import ChannelInvalidError
from telethon.tl.functions.messages import ImportChatInviteRequest
@ -9,6 +10,7 @@ import re, time, json, os
from . import Archiver
from ..core import Metadata, Media, ArchivingContext
from ..utils import random_str
class TelethonArchiver(Archiver):
@ -21,8 +23,6 @@ class TelethonArchiver(Archiver):
self.assert_valid_string("api_id")
self.assert_valid_string("api_hash")
self.client = TelegramClient(self.session_file, self.api_id, self.api_hash)
@staticmethod
def configs() -> dict:
return {
@ -40,10 +40,20 @@ class TelethonArchiver(Archiver):
def setup(self) -> None:
"""
1. trigger login process for telegram or proceed if already saved in a session file
2. joins channel_invites where needed
1. makes a copy of session_file that is removed in cleanup
2. trigger login process for telegram or proceed if already saved in a session file
3. joins channel_invites where needed
"""
logger.info(f"SETUP {self.name} checking login...")
# make a copy of the session that is used exclusively with this archiver instance
new_session_file = os.path.join("secrets/", f"telethon-{time.strftime('%Y-%m-%d')}{random_str(8)}.session")
shutil.copy(self.session_file + ".session", new_session_file)
self.session_file = new_session_file
# initiate the client
self.client = TelegramClient(self.session_file, self.api_id, self.api_hash)
with self.client.start():
logger.success(f"SETUP {self.name} login works.")
@ -89,6 +99,11 @@ class TelethonArchiver(Archiver):
i += 1
pbar.update()
def cleanup(self) -> None:
logger.info(f"CLEANUP {self.name}.")
if os.path.exists(self.session_file):
os.remove(self.session_file)
def download(self, item: Metadata) -> Metadata:
"""
if this url is archivable will download post info and look for other posts from the same group with media.
@ -137,7 +152,7 @@ class TelethonArchiver(Archiver):
if len(other_media_urls):
logger.debug(f"Got {len(other_media_urls)} other media urls from {mp.id=}: {other_media_urls}")
for i, om_url in enumerate(other_media_urls):
filename = self.download_from_url(om_url, f'{chat}_{group_id}_{i}', item)
filename = self.download_from_url(om_url, f'{chat}_{group_id}_{i}')
result.add_media(Media(filename=filename), id=f"{group_id}_{i}")
filename_dest = os.path.join(tmp_dir, f'{chat}_{group_id}', str(mp.id))

Wyświetl plik

@ -90,7 +90,7 @@ class TwitterApiArchiver(TwitterArchiver, Archiver):
continue
logger.info(f"Found media {media}")
ext = mimetypes.guess_extension(mimetype)
media.filename = self.download_from_url(media.get("src"), f'{slugify(url)}_{i}{ext}', item)
media.filename = self.download_from_url(media.get("src"), f'{slugify(url)}_{i}{ext}')
result.add_media(media)
result.set_content(json.dumps({

Wyświetl plik

@ -80,7 +80,7 @@ class TwitterArchiver(Archiver):
logger.warning(f"Could not get media URL of {tweet_media}")
continue
ext = mimetypes.guess_extension(mimetype)
media.filename = self.download_from_url(media.get("src"), f'{slugify(url)}_{i}{ext}', item)
media.filename = self.download_from_url(media.get("src"), f'{slugify(url)}_{i}{ext}')
result.add_media(media)
return result.success("twitter-snscrape")
@ -120,7 +120,7 @@ class TwitterArchiver(Archiver):
if (mtype := mimetypes.guess_type(UrlUtil.remove_get_parameters(u))[0]):
ext = mimetypes.guess_extension(mtype)
media.filename = self.download_from_url(u, f'{slugify(url)}_{i}{ext}', item)
media.filename = self.download_from_url(u, f'{slugify(url)}_{i}{ext}')
result.add_media(media)
result.set_title(tweet.get("text")).set_content(json.dumps(tweet, ensure_ascii=False)).set_timestamp(datetime.strptime(tweet["created_at"], "%Y-%m-%dT%H:%M:%S.%fZ"))

Wyświetl plik

@ -1,4 +1,4 @@
import datetime, os, yt_dlp
import datetime, os, yt_dlp, pysubs2
from loguru import logger
from . import Archiver
@ -10,38 +10,51 @@ class YoutubeDLArchiver(Archiver):
def __init__(self, config: dict) -> None:
super().__init__(config)
self.subtitles = bool(self.subtitles)
self.comments = bool(self.comments)
self.livestreams = bool(self.livestreams)
self.live_from_start = bool(self.live_from_start)
self.end_means_success = bool(self.end_means_success)
@staticmethod
def configs() -> dict:
return {
"facebook_cookie": {"default": None, "help": "optional facebook cookie to have more access to content, from browser, looks like 'cookie: datr= xxxx'"},
"subtitles": {"default": True, "help": "download subtitles if available"},
"comments": {"default": False, "help": "download all comments if available, may lead to large metadata"},
"livestreams": {"default": False, "help": "if set, will download live streams, otherwise will skip them; see --max-filesize for more control"},
"live_from_start": {"default": False, "help": "if set, will download live streams from their earliest available moment, otherwise starts now."},
"proxy": {"default": "", "help": "http/socks (https seems to not work atm) proxy to use for the webdriver, eg https://proxy-user:password@proxy-ip:port"},
"end_means_success": {"default": True, "help": "if True, any archived content will mean a 'success', if False this archiver will not return a 'success' stage; this is useful for cases when the yt-dlp will archive a video but ignore other types of content like images or text only pages that the subsequent archivers can retrieve."},
}
def download(self, item: Metadata) -> Metadata:
#TODO: yt-dlp for transcripts?
url = item.get_url()
if item.netloc in ['facebook.com', 'www.facebook.com'] and self.facebook_cookie:
logger.debug('Using Facebook cookie')
yt_dlp.utils.std_headers['cookie'] = self.facebook_cookie
ydl = yt_dlp.YoutubeDL({'outtmpl': os.path.join(ArchivingContext.get_tmp_dir(), f'%(id)s.%(ext)s'), 'quiet': False, 'noplaylist': True})
ydl_options = {'outtmpl': os.path.join(ArchivingContext.get_tmp_dir(), f'%(id)s.%(ext)s'), 'quiet': False, 'noplaylist': True, 'writesubtitles': self.subtitles, 'writeautomaticsub': self.subtitles, "live_from_start": self.live_from_start, "proxy": self.proxy}
ydl = yt_dlp.YoutubeDL(ydl_options) # allsubtitles and subtitleslangs not working as expected, so default lang is always "en"
try:
# don'd download since it can be a live stream
info = ydl.extract_info(url, download=False)
if info.get('is_live', False):
logger.warning("Live streaming media, not archiving now")
if info.get('is_live', False) and not self.livestreams:
logger.warning("Livestream detected, skipping due to 'livestreams' configuration setting")
return False
except yt_dlp.utils.DownloadError as e:
logger.debug(f'No video - Youtube normal control flow: {e}')
return False
except Exception as e:
logger.debug(f'ytdlp exception which is normal for example a facebook page with images only will cause a IndexError: list index out of range. Exception here is: \n {e}')
logger.debug(f'ytdlp exception which is normal for example a facebook page with images only will cause a IndexError: list index out of range. Exception is: \n {e}')
return False
# this time download
ydl = yt_dlp.YoutubeDL({**ydl_options, "getcomments": self.comments})
info = ydl.extract_info(url, download=True)
if "entries" in info:
entries = info.get("entries", [])
if not len(entries):
@ -52,10 +65,32 @@ class YoutubeDLArchiver(Archiver):
result = Metadata()
result.set_title(info.get("title"))
for entry in entries:
filename = ydl.prepare_filename(entry)
if not os.path.exists(filename):
filename = filename.split('.')[0] + '.mkv'
result.add_media(Media(filename).set("duration", info.get("duration")))
try:
filename = ydl.prepare_filename(entry)
if not os.path.exists(filename):
filename = filename.split('.')[0] + '.mkv'
new_media = Media(filename).set("duration", info.get("duration"))
# read text from subtitles if enabled
if self.subtitles:
for lang, val in (info.get('requested_subtitles') or {}).items():
try:
subs = pysubs2.load(val.get('filepath'), encoding="utf-8")
text = " ".join([line.text for line in subs])
new_media.set(f"subtitles_{lang}", text)
except Exception as e:
logger.error(f"Error loading subtitle file {val.get('filepath')}: {e}")
result.add_media(new_media)
except Exception as e:
logger.error(f"Error processing entry {entry}: {e}")
# extract comments if enabled
if self.comments:
result.set("comments", [{
"text": c["text"],
"author": c["author"],
"timestamp": datetime.datetime.utcfromtimestamp(c.get("timestamp")).replace(tzinfo=datetime.timezone.utc)
} for c in info.get("comments", [])])
if (timestamp := info.get("timestamp")):
timestamp = datetime.datetime.utcfromtimestamp(timestamp).replace(tzinfo=datetime.timezone.utc).isoformat()
@ -64,4 +99,6 @@ class YoutubeDLArchiver(Archiver):
upload_date = datetime.datetime.strptime(upload_date, '%Y%m%d').replace(tzinfo=datetime.timezone.utc)
result.set("upload_date", upload_date)
return result.success("yt-dlp")
if self.end_means_success: result.success("yt-dlp")
else: result.status = "yt-dlp"
return result

Wyświetl plik

@ -1,6 +1,3 @@
from loguru import logger
class ArchivingContext:
"""
Singleton context class.

Wyświetl plik

@ -44,10 +44,14 @@ class Media:
"""
if include_self: yield self
for prop in self.properties.values():
if isinstance(prop, Media): yield prop
if isinstance(prop, Media):
for inner_media in prop.all_inner_media(include_self=True):
yield inner_media
if isinstance(prop, list):
for prop_media in prop:
if isinstance(prop_media, Media): yield prop_media
if isinstance(prop_media, Media):
for inner_media in prop_media.all_inner_media(include_self=True):
yield inner_media
def is_stored(self) -> bool:
return len(self.urls) > 0 and len(self.urls) == len(ArchivingContext.get("storages"))

Wyświetl plik

@ -54,6 +54,12 @@ class Metadata:
self.metadata[key] = val
return self
def append(self, key: str, val: Any) -> Metadata:
if key not in self.metadata:
self.metadata[key] = []
self.metadata[key] = val
return self
def get(self, key: str, default: Any = None, create_if_missing=False) -> Union[Metadata, str]:
# goes through metadata and returns the Metadata available
if create_if_missing and key not in self.metadata:
@ -69,7 +75,8 @@ class Metadata:
return "success" in self.status
def is_empty(self) -> bool:
return not self.is_success() and len(self.media) == 0 and len(self.metadata) <= 2 # url, processed_at
meaningfull_ids = set(self.metadata.keys()) - set(["_processed_at", "url", "total_bytes", "total_size", "archive_duration_seconds"])
return not self.is_success() and len(self.media) == 0 and len(meaningfull_ids) == 0
@property # getter .netloc
def netloc(self) -> str:

Wyświetl plik

@ -25,13 +25,28 @@ class ArchivingOrchestrator:
self.storages: List[Storage] = config.storages
ArchivingContext.set("storages", self.storages, keep_on_reset=True)
for a in self.archivers: a.setup()
try:
for a in self.archivers: a.setup()
except (KeyboardInterrupt, Exception) as e:
logger.error(f"Error during setup of archivers: {e}\n{traceback.format_exc()}")
self.cleanup()
def cleanup(self)->None:
logger.info("Cleaning up")
for a in self.archivers: a.cleanup()
def feed(self) -> Generator[Metadata]:
for item in self.feeder:
yield self.feed_item(item)
self.cleanup()
def feed_item(self, item: Metadata) -> Metadata:
"""
Takes one item (URL) to archive and calls self.archive, additionally:
- catches keyboard interruptions to do a clean exit
- catches any unexpected error, logs it, and does a clean exit
"""
try:
ArchivingContext.reset()
with tempfile.TemporaryDirectory(dir="./") as tmp_dir:
@ -41,36 +56,34 @@ class ArchivingOrchestrator:
# catches keyboard interruptions to do a clean exit
logger.warning(f"caught interrupt on {item=}")
for d in self.databases: d.aborted(item)
self.cleanup()
exit()
except Exception as e:
logger.error(f'Got unexpected error on item {item}: {e}\n{traceback.format_exc()}')
for d in self.databases: d.failed(item)
# how does this handle the parameters like folder which can be different for each archiver?
# the storage needs to know where to archive!!
# solution: feeders have context: extra metadata that they can read or ignore,
# all of it should have sensible defaults (eg: folder)
# default feeder is a list with 1 element
def archive(self, result: Metadata) -> Union[Metadata, None]:
"""
Runs the archiving process for a single URL
1. Each archiver can sanitize its own URLs
2. Check for cached results in Databases, and signal start to the databases
3. Call Archivers until one succeeds
4. Call Enrichers
5. Store all downloaded/generated media
6. Call selected Formatter and store formatted if needed
"""
original_url = result.get_url()
# 1 - cleanup
# each archiver is responsible for cleaning/expanding its own URLs
# 1 - sanitize - each archiver is responsible for cleaning/expanding its own URLs
url = original_url
for a in self.archivers: url = a.sanitize_url(url)
result.set_url(url)
if original_url != url: result.set("original_url", original_url)
# 2 - notify start to DB
# signal to DB that archiving has started
# and propagate already archived if it exists
# 2 - notify start to DBs, propagate already archived if feature enabled in DBs
cached_result = None
for d in self.databases:
# are the databases to decide whether to archive?
# they can simply return True by default, otherwise they can avoid duplicates. should this logic be more granular, for example on the archiver level: a tweet will not need be scraped twice, whereas an instagram profile might. the archiver could not decide from the link which parts to archive,
# instagram profile example: it would always re-archive everything
# maybe the database/storage could use a hash/key to decide if there's a need to re-archive
d.started(result)
if (local_result := d.fetch(result)):
cached_result = (cached_result or Metadata()).merge(local_result)
@ -84,30 +97,21 @@ class ArchivingOrchestrator:
for a in self.archivers:
logger.info(f"Trying archiver {a.name} for {url}")
try:
# Q: should this be refactored so it's just a.download(result)?
result.merge(a.download(result))
if result.is_success(): break
except Exception as e: logger.error(f"Unexpected error with archiver {a.name}: {e}: {traceback.format_exc()}")
except Exception as e:
logger.error(f"ERROR archiver {a.name}: {e}: {traceback.format_exc()}")
# what if an archiver returns multiple entries and one is to be part of HTMLgenerator?
# should it call the HTMLgenerator as if it's not an enrichment?
# eg: if it is enable: generates an HTML with all the returned media, should it include enrichers? yes
# then how to execute it last? should there also be post-processors? are there other examples?
# maybe as a PDF? or a Markdown file
# 4 - call enrichers: have access to archived content, can generate metadata and Media
# eg: screenshot, wacz, webarchive, thumbnails
# 4 - call enrichers to work with archived content
for e in self.enrichers:
try: e.enrich(result)
except Exception as exc: logger.error(f"Unexpected error with enricher {e.name}: {exc}: {traceback.format_exc()}")
except Exception as exc:
logger.error(f"ERROR enricher {e.name}: {exc}: {traceback.format_exc()}")
# 5 - store media
# looks for Media in result.media and also result.media[x].properties (as list or dict values)
# 5 - store all downloaded/generated media
result.store()
# 6 - format and store formatted if needed
# enrichers typically need access to already stored URLs etc
if (final_media := self.formatter.format(result)):
final_media.store(url=url)
result.set_final_media(final_media)
@ -115,7 +119,7 @@ class ArchivingOrchestrator:
if result.is_empty():
result.status = "nothing archived"
# signal completion to databases (DBs, Google Sheets, CSV, ...)
# signal completion to databases and archivers
for d in self.databases: d.done(result)
return result

Wyświetl plik

@ -32,7 +32,7 @@ class Database(Step, ABC):
# @abstractmethod
def fetch(self, item: Metadata) -> Union[Metadata, bool]:
"""check if the given item has been archived already"""
"""check and fetch if the given item has been archived already, each database should handle its own caching, and configuration mechanisms"""
return False
@abstractmethod

Wyświetl plik

@ -7,4 +7,6 @@ from .wacz_enricher import WaczArchiverEnricher
from .whisper_enricher import WhisperEnricher
from .pdq_hash_enricher import PdqHashEnricher
from .metadata_enricher import MetadataEnricher
from .meta_enricher import MetaEnricher
from .meta_enricher import MetaEnricher
from .ssl_enricher import SSLEnricher
from .timestamping_enricher import TimestampingEnricher

Wyświetl plik

@ -19,22 +19,26 @@ class MetaEnricher(Enricher):
@staticmethod
def configs() -> dict:
return {
}
return {}
def enrich(self, to_enrich: Metadata) -> None:
logger.debug(f"calculating archive metadata information for url={to_enrich.get_url()}")
url = to_enrich.get_url()
if to_enrich.is_empty():
logger.debug(f"[SKIP] META_ENRICHER there is no media or metadata to enrich: {url=}")
return
logger.debug(f"calculating archive metadata information for {url=}")
self.enrich_file_sizes(to_enrich)
self.enrich_archive_duration(to_enrich)
def enrich_file_sizes(self, to_enrich):
def enrich_file_sizes(self, to_enrich: Metadata):
logger.debug(f"calculating archive file sizes for url={to_enrich.get_url()} ({len(to_enrich.media)} media files)")
total_size = 0
for i, m in enumerate(to_enrich.media):
file_stats = os.stat(m.filename)
to_enrich.media[i].set("bytes", file_stats.st_size)
to_enrich.media[i].set("size", self.human_readable_bytes(file_stats.st_size))
for media in to_enrich.get_all_media():
file_stats = os.stat(media.filename)
media.set("bytes", file_stats.st_size)
media.set("size", self.human_readable_bytes(file_stats.st_size))
total_size += file_stats.st_size
to_enrich.set("total_bytes", total_size)

Wyświetl plik

@ -16,7 +16,8 @@ class ScreenshotEnricher(Enricher):
"width": {"default": 1280, "help": "width of the screenshots"},
"height": {"default": 720, "help": "height of the screenshots"},
"timeout": {"default": 60, "help": "timeout for taking the screenshot"},
"sleep_before_screenshot": {"default": 4, "help": "seconds to wait for the pages to load before taking screenshot"}
"sleep_before_screenshot": {"default": 4, "help": "seconds to wait for the pages to load before taking screenshot"},
"http_proxy": {"default": "", "help": "http proxy to use for the webdriver, eg http://proxy-user:password@proxy-ip:port"},
}
def enrich(self, to_enrich: Metadata) -> None:
@ -26,7 +27,7 @@ class ScreenshotEnricher(Enricher):
return
logger.debug(f"Enriching screenshot for {url=}")
with Webdriver(self.width, self.height, self.timeout, 'facebook.com' in url) as driver:
with Webdriver(self.width, self.height, self.timeout, 'facebook.com' in url, http_proxy=self.http_proxy) as driver:
try:
driver.get(url)
time.sleep(int(self.sleep_before_screenshot))

Wyświetl plik

@ -0,0 +1,36 @@
import ssl, os
from slugify import slugify
from urllib.parse import urlparse
from loguru import logger
from . import Enricher
from ..core import Metadata, ArchivingContext, Media
class SSLEnricher(Enricher):
"""
Retrieves SSL certificate information for a domain, as a file
"""
name = "ssl_enricher"
def __init__(self, config: dict) -> None:
super().__init__(config)
self. skip_when_nothing_archived = bool(self.skip_when_nothing_archived)
@staticmethod
def configs() -> dict:
return {
"skip_when_nothing_archived": {"default": True, "help": "if true, will skip enriching when no media is archived"},
}
def enrich(self, to_enrich: Metadata) -> None:
if not to_enrich.media and self.skip_when_nothing_archived: return
url = to_enrich.get_url()
domain = urlparse(url).netloc
logger.debug(f"fetching SSL certificate for {domain=} in {url=}")
cert = ssl.get_server_certificate((domain, 443))
cert_fn = os.path.join(ArchivingContext.get_tmp_dir(), f"{slugify(domain)}.pem")
with open(cert_fn, "w") as f: f.write(cert)
to_enrich.add_media(Media(filename=cert_fn), id="ssl_certificate")

Wyświetl plik

@ -15,32 +15,54 @@ class ThumbnailEnricher(Enricher):
def __init__(self, config: dict) -> None:
# without this STEP.__init__ is not called
super().__init__(config)
self.thumbnails_per_second = int(self.thumbnails_per_minute) / 60
self.max_thumbnails = int(self.max_thumbnails)
@staticmethod
def configs() -> dict:
return {}
return {
"thumbnails_per_minute": {"default": 60, "help": "how many thumbnails to generate per minute of video, can be limited by max_thumbnails"},
"max_thumbnails": {"default": 16, "help": "limit the number of thumbnails to generate per video, 0 means no limit"},
}
def enrich(self, to_enrich: Metadata) -> None:
logger.debug(f"generating thumbnails")
for i, m in enumerate(to_enrich.media[::]):
"""
Uses or reads the video duration to generate thumbnails
Calculates how many thumbnails to generate and at which timestamps based on the video duration, the number of thumbnails per minute and the max number of thumbnails.
Thumbnails are equally distributed across the video duration.
"""
logger.debug(f"generating thumbnails for {to_enrich.get_url()}")
for m_id, m in enumerate(to_enrich.media[::]):
if m.is_video():
folder = os.path.join(ArchivingContext.get_tmp_dir(), random_str(24))
os.makedirs(folder, exist_ok=True)
logger.debug(f"generating thumbnails for {m.filename}")
fps, duration = 0.5, m.get("duration")
if duration is not None:
duration = float(duration)
if duration < 60: fps = 10.0 / duration
elif duration < 120: fps = 20.0 / duration
else: fps = 40.0 / duration
duration = m.get("duration")
stream = ffmpeg.input(m.filename)
stream = ffmpeg.filter(stream, 'fps', fps=fps).filter('scale', 512, -1)
stream.output(os.path.join(folder, 'out%d.jpg')).run()
if duration is None:
try:
probe = ffmpeg.probe(m.filename)
duration = float(next(stream for stream in probe['streams'] if stream['codec_type'] == 'video')['duration'])
to_enrich.media[m_id].set("duration", duration)
except Exception as e:
logger.error(f"error getting duration of video {m.filename}: {e}")
return
num_thumbs = int(min(max(1, duration * self.thumbnails_per_second), self.max_thumbnails))
timestamps = [duration / (num_thumbs + 1) * i for i in range(1, num_thumbs + 1)]
thumbnails = os.listdir(folder)
thumbnails_media = []
for t, fname in enumerate(thumbnails):
if fname[-3:] == 'jpg':
thumbnails_media.append(Media(filename=os.path.join(folder, fname)).set("id", f"thumbnail_{t}"))
to_enrich.media[i].set("thumbnails", thumbnails_media)
for index, timestamp in enumerate(timestamps):
output_path = os.path.join(folder, f"out{index}.jpg")
ffmpeg.input(m.filename, ss=timestamp).filter('scale', 512, -1).output(output_path, vframes=1, loglevel="quiet").run()
try:
thumbnails_media.append(Media(
filename=output_path)
.set("id", f"thumbnail_{index}")
.set("timestamp", "%.3fs" % timestamp)
)
except Exception as e:
logger.error(f"error creating thumbnail {index} for media: {e}")
to_enrich.media[m_id].set("thumbnails", thumbnails_media)

Wyświetl plik

@ -0,0 +1,136 @@
import os
from loguru import logger
from tsp_client import TSPSigner, SigningSettings, TSPVerifier
from tsp_client.algorithms import DigestAlgorithm
from importlib.metadata import version
from asn1crypto.cms import ContentInfo
from certvalidator import CertificateValidator, ValidationContext
from asn1crypto import pem
import certifi
from . import Enricher
from ..core import Metadata, ArchivingContext, Media
from ..archivers import Archiver
class TimestampingEnricher(Enricher):
"""
Uses several RFC3161 Time Stamp Authorities to generate a timestamp token that will be preserved. This can be used to prove that a certain file existed at a certain time, useful for legal purposes, for example, to prove that a certain file was not tampered with after a certain date.
The information that gets timestamped is concatenation (via paragraphs) of the file hashes existing in the current archive. It will depend on which archivers and enrichers ran before this one. Inner media files (like thumbnails) are not included in the .txt file. It should run AFTER the hash_enricher.
See https://gist.github.com/Manouchehri/fd754e402d98430243455713efada710 for list of timestamp authorities.
"""
name = "timestamping_enricher"
def __init__(self, config: dict) -> None:
super().__init__(config)
@staticmethod
def configs() -> dict:
return {
"tsa_urls": {
"default": [
# [Adobe Approved Trust List] and [Windows Cert Store]
"http://timestamp.digicert.com",
"http://timestamp.identrust.com",
# "https://timestamp.entrust.net/TSS/RFC3161sha2TS", # not valid for timestamping
# "https://timestamp.sectigo.com", # wait 15 seconds between each request.
# [Adobe: European Union Trusted Lists].
# "https://timestamp.sectigo.com/qualified", # wait 15 seconds between each request.
# [Windows Cert Store]
"http://timestamp.globalsign.com/tsa/r6advanced1",
# [Adobe: European Union Trusted Lists] and [Windows Cert Store]
# "http://ts.quovadisglobal.com/eu", # not valid for timestamping
# "http://tsa.belgium.be/connect", # self-signed certificate in certificate chain
# "https://timestamp.aped.gov.gr/qtss", # self-signed certificate in certificate chain
# "http://tsa.sep.bg", # self-signed certificate in certificate chain
# "http://tsa.izenpe.com", #unable to get local issuer certificate
# "http://kstamp.keynectis.com/KSign", # unable to get local issuer certificate
"http://tss.accv.es:8318/tsa",
],
"help": "List of RFC3161 Time Stamp Authorities to use, separate with commas if passed via the command line.",
"cli_set": lambda cli_val, cur_val: set(cli_val.split(","))
}
}
def enrich(self, to_enrich: Metadata) -> None:
url = to_enrich.get_url()
logger.debug(f"RFC3161 timestamping existing files for {url=}")
# create a new text file with the existing media hashes
hashes = [m.get("hash").replace("SHA-256:", "").replace("SHA3-512:", "") for m in to_enrich.media if m.get("hash")]
if not len(hashes):
logger.warning(f"No hashes found in {url=}")
return
tmp_dir = ArchivingContext.get_tmp_dir()
hashes_fn = os.path.join(tmp_dir, "hashes.txt")
data_to_sign = "\n".join(hashes)
with open(hashes_fn, "w") as f:
f.write(data_to_sign)
hashes_media = Media(filename=hashes_fn)
timestamp_tokens = []
from slugify import slugify
for tsa_url in self.tsa_urls:
try:
signing_settings = SigningSettings(tsp_server=tsa_url, digest_algorithm=DigestAlgorithm.SHA256)
signer = TSPSigner()
message = bytes(data_to_sign, encoding='utf8')
# send TSQ and get TSR from the TSA server
signed = signer.sign(message=message, signing_settings=signing_settings)
# fail if there's any issue with the certificates, uses certifi list of trusted CAs
TSPVerifier(certifi.where()).verify(signed, message=message)
# download and verify timestamping certificate
cert_chain = self.download_and_verify_certificate(signed)
# continue with saving the timestamp token
tst_fn = os.path.join(tmp_dir, f"timestamp_token_{slugify(tsa_url)}")
with open(tst_fn, "wb") as f: f.write(signed)
timestamp_tokens.append(Media(filename=tst_fn).set("tsa", tsa_url).set("cert_chain", cert_chain))
except Exception as e:
logger.warning(f"Error while timestamping {url=} with {tsa_url=}: {e}")
if len(timestamp_tokens):
hashes_media.set("timestamp_authority_files", timestamp_tokens)
hashes_media.set("certifi v", version("certifi"))
hashes_media.set("tsp_client v", version("tsp_client"))
hashes_media.set("certvalidator v", version("certvalidator"))
to_enrich.add_media(hashes_media, id="timestamped_hashes")
to_enrich.set("timestamped", True)
logger.success(f"{len(timestamp_tokens)} timestamp tokens created for {url=}")
else:
logger.warning(f"No successful timestamps for {url=}")
def download_and_verify_certificate(self, signed: bytes) -> list[Media]:
# returns the leaf certificate URL, fails if not set
tst = ContentInfo.load(signed)
trust_roots = []
with open(certifi.where(), 'rb') as f:
for _, _, der_bytes in pem.unarmor(f.read(), multiple=True):
trust_roots.append(der_bytes)
context = ValidationContext(trust_roots=trust_roots)
certificates = tst["content"]["certificates"]
first_cert = certificates[0].dump()
intermediate_certs = []
for i in range(1, len(certificates)): # cannot use list comprehension [1:]
intermediate_certs.append(certificates[i].dump())
validator = CertificateValidator(first_cert, intermediate_certs=intermediate_certs, validation_context=context)
path = validator.validate_usage({'digital_signature'}, extended_key_usage={'time_stamping'})
cert_chain = []
for cert in path:
cert_fn = os.path.join(ArchivingContext.get_tmp_dir(), f"{str(cert.serial_number)[:20]}.crt")
with open(cert_fn, "wb") as f:
f.write(cert.dump())
cert_chain.append(Media(filename=cert_fn).set("subject", cert.subject.native["common_name"]))
return cert_chain

Wyświetl plik

@ -31,7 +31,9 @@ class WaczArchiverEnricher(Enricher, Archiver):
"docker_commands": {"default": None, "help":"if a custom docker invocation is needed"},
"timeout": {"default": 120, "help": "timeout for WACZ generation in seconds"},
"extract_media": {"default": False, "help": "If enabled all the images/videos/audio present in the WACZ archive will be extracted into separate Media and appear in the html report. The .wacz file will be kept untouched."},
"extract_screenshot": {"default": True, "help": "If enabled the screenshot captured by browsertrix will be extracted into separate Media and appear in the html report. The .wacz file will be kept untouched."}
"extract_screenshot": {"default": True, "help": "If enabled the screenshot captured by browsertrix will be extracted into separate Media and appear in the html report. The .wacz file will be kept untouched."},
"socks_proxy_host": {"default": None, "help": "SOCKS proxy host for browsertrix-crawler, use in combination with socks_proxy_port. eg: user:password@host"},
"socks_proxy_port": {"default": None, "help": "SOCKS proxy port for browsertrix-crawler, use in combination with socks_proxy_host. eg 1234"},
}
def download(self, item: Metadata) -> Metadata:
@ -91,7 +93,12 @@ class WaczArchiverEnricher(Enricher, Archiver):
try:
logger.info(f"Running browsertrix-crawler: {' '.join(cmd)}")
subprocess.run(cmd, check=True)
if self.socks_proxy_host and self.socks_proxy_port:
logger.debug("Using SOCKS proxy for browsertrix-crawler")
my_env = os.environ.copy()
my_env["SOCKS_HOST"] = self.socks_proxy_host
my_env["SOCKS_PORT"] = str(self.socks_proxy_port)
subprocess.run(cmd, check=True, env=my_env)
except Exception as e:
logger.error(f"WACZ generation failed: {e}")
return False
@ -191,7 +198,7 @@ class WaczArchiverEnricher(Enricher, Archiver):
# if a link with better quality exists, try to download that
if record_url_best_qual != record_url:
try:
m.filename = self.download_from_url(record_url_best_qual, warc_fn, to_enrich)
m.filename = self.download_from_url(record_url_best_qual, warc_fn)
m.set("src", record_url_best_qual)
m.set("src_alternative", record_url)
except Exception as e: logger.warning(f"Unable to download best quality URL for {record_url=} got error {e}, using original in WARC.")

Wyświetl plik

@ -1,7 +1,6 @@
from loguru import logger
import time, requests
from . import Enricher
from ..archivers import Archiver
from ..utils import UrlUtil
@ -9,7 +8,9 @@ from ..core import Metadata
class WaybackArchiverEnricher(Enricher, Archiver):
"""
Submits the current URL to the webarchive and returns a job_id or completed archive
Submits the current URL to the webarchive and returns a job_id or completed archive.
The Wayback machine will rate-limit IP heavy usage.
"""
name = "wayback_archiver_enricher"
@ -25,7 +26,9 @@ class WaybackArchiverEnricher(Enricher, Archiver):
"timeout": {"default": 15, "help": "seconds to wait for successful archive confirmation from wayback, if more than this passes the result contains the job_id so the status can later be checked manually."},
"if_not_archived_within": {"default": None, "help": "only tell wayback to archive if no archive is available before the number of seconds specified, use None to ignore this option. For more information: https://docs.google.com/document/d/1Nsv52MvSjbLb2PCpHlat0gkzw0EvtSgpKHu4mk0MnrA"},
"key": {"default": None, "help": "wayback API key. to get credentials visit https://archive.org/account/s3.php"},
"secret": {"default": None, "help": "wayback API secret. to get credentials visit https://archive.org/account/s3.php"}
"secret": {"default": None, "help": "wayback API secret. to get credentials visit https://archive.org/account/s3.php"},
"proxy_http": {"default": None, "help": "http proxy to use for wayback requests, eg http://proxy-user:password@proxy-ip:port"},
"proxy_https": {"default": None, "help": "https proxy to use for wayback requests, eg https://proxy-user:password@proxy-ip:port"},
}
def download(self, item: Metadata) -> Metadata:
@ -36,6 +39,10 @@ class WaybackArchiverEnricher(Enricher, Archiver):
return result.success("wayback")
def enrich(self, to_enrich: Metadata) -> bool:
proxies = {}
if self.proxy_http: proxies["http"] = self.proxy_http
if self.proxy_https: proxies["https"] = self.proxy_https
url = to_enrich.get_url()
if UrlUtil.is_auth_wall(url):
logger.debug(f"[SKIP] WAYBACK since url is behind AUTH WALL: {url=}")
@ -55,7 +62,7 @@ class WaybackArchiverEnricher(Enricher, Archiver):
if self.if_not_archived_within:
post_data["if_not_archived_within"] = self.if_not_archived_within
# see https://docs.google.com/document/d/1Nsv52MvSjbLb2PCpHlat0gkzw0EvtSgpKHu4mk0MnrA for more options
r = requests.post('https://web.archive.org/save/', headers=ia_headers, data=post_data)
r = requests.post('https://web.archive.org/save/', headers=ia_headers, data=post_data, proxies=proxies)
if r.status_code != 200:
logger.error(em := f"Internet archive failed with status of {r.status_code}: {r.json()}")
@ -75,14 +82,16 @@ class WaybackArchiverEnricher(Enricher, Archiver):
while not wayback_url and time.time() - start_time <= self.timeout:
try:
logger.debug(f"GETting status for {job_id=} on {url=} ({attempt=})")
r_status = requests.get(f'https://web.archive.org/save/status/{job_id}', headers=ia_headers)
r_status = requests.get(f'https://web.archive.org/save/status/{job_id}', headers=ia_headers, proxies=proxies)
r_json = r_status.json()
if r_status.status_code == 200 and r_json['status'] == 'success':
wayback_url = f"https://web.archive.org/web/{r_json['timestamp']}/{r_json['original_url']}"
elif r_status.status_code != 200 or r_json['status'] != 'pending':
logger.error(f"Wayback failed with {r_json}")
return False
except requests.exceptions.RequestException as e:
logger.warning(f"RequestException: fetching status for {url=} due to: {e}")
break
except Exception as e:
logger.warning(f"error fetching status for {url=} due to: {e}")
if not wayback_url:

Wyświetl plik

@ -10,7 +10,7 @@ from ..storages import S3Storage
class WhisperEnricher(Enricher):
"""
Connects with a Whisper API service to get texts out of audio
whisper API repository: TODO
whisper API repository: https://github.com/bellingcat/whisperbox-transcribe/
Only works if an S3 compatible storage is used
"""
name = "whisper_enricher"

Wyświetl plik

@ -4,6 +4,8 @@ import mimetypes, os, pathlib
from jinja2 import Environment, FileSystemLoader
from urllib.parse import quote
from loguru import logger
import minify_html, json
import base64
from ..version import __version__
from ..core import Metadata, Media, ArchivingContext
@ -45,6 +47,8 @@ class HtmlFormatter(Formatter):
metadata=item.metadata,
version=__version__
)
content = minify_html.minify(content, minify_js=False, minify_css=True)
html_path = os.path.join(ArchivingContext.get_tmp_dir(), f"formatted{random_str(24)}.html")
with open(html_path, mode="w", encoding="utf-8") as outf:
outf.write(content)
@ -89,3 +93,8 @@ class JinjaHelpers:
@staticmethod
def quote(s: str) -> str:
return quote(s)
@staticmethod
def json_dump_b64(d: dict) -> str:
j = json.dumps(d, indent=4, default=str)
return base64.b64encode(j.encode()).decode()

Wyświetl plik

@ -96,6 +96,16 @@
overflow: hidden;
background-color: #f1f1f1;
}
.pem-certificate, .text-preview {
text-align: left;
font-size: small;
}
.text-preview{
padding-left: 10px;
padding-right: 10px;
white-space: pre-wrap;
}
</style>
</head>
@ -121,42 +131,7 @@
{% for m in media %}
<tr>
<td>
<ul>
<li><b>key:</b> <span class="copy">{{ m.key }}</span></li>
<li><b>type:</b> <span class="copy">{{ m.mimetype }}</span></li>
{% for prop in m.properties %}
{% if m.properties[prop] | is_list %}
<p></p>
<div>
<b class="collapsible" title="expand">{{ prop }}:</b>
<p></p>
<div class="collapsible-content">
{% for subprop in m.properties[prop] %}
{% if subprop | is_media %}
{{ macros.display_media(subprop, true, url) }}
<ul>
{% for subprop_prop in subprop.properties %}
<li><b>{{ subprop_prop }}:</b>
{{ macros.copy_urlize(subprop.properties[subprop_prop]) }}</li>
{% endfor %}
</ul>
{% else %}
{{ subprop }}
{% endif %}
{% endfor %}
</div>
</div>
<p></p>
{% elif m.properties[prop] | string | length > 1 %}
<li><b>{{ prop }}:</b> {{ macros.copy_urlize(m.properties[prop]) }}</li>
{% endif %}
{% endfor %}
</ul>
{{ macros.display_recursive(m, true) }}
</td>
<td>
{{ macros.display_media(m, true, url) }}
@ -175,16 +150,68 @@
<tr>
<td>{{ key }}</td>
<td>
{% if metadata[key] is mapping %}
<div class="center copy" copy-value64='{{metadata[key] | json_dump_b64}}'>Copy as JSON</div>
{% endif %}
{{ macros.copy_urlize(metadata[key]) }}
</td>
</tr>
{% endfor %}
</table>
<p style="text-align:center;">Made with <a
href="https://github.com/bellingcat/auto-archiver">bellingcat/auto-archiver</a> v{{ version }}</p>
<p class="center">Made with <a href="https://github.com/bellingcat/auto-archiver">bellingcat/auto-archiver</a>
v{{ version }}</p>
</body>
<script src="https://cdnjs.cloudflare.com/ajax/libs/forge/0.10.0/forge.min.js"></script>
<script defer>
// partial decode of SSL certificates
function decodeCertificate(sslCert) {
var cert = forge.pki.certificateFromPem(sslCert);
return `SSL CERTIFICATE PREVIEW:<br/><ul>
<li><b>Subject:</b> <span class="copy">${cert.subject.attributes.map(attr => `${attr.shortName}: ${attr.value}`).join(", ")}</span></li>
<li><b>Issuer:</b> <span class="copy">${cert.issuer.attributes.map(attr => `${attr.shortName}: ${attr.value}`).join(", ")}</span></li>
<li><b>Valid From:</b> <span class="copy">${cert.validity.notBefore}</span></li>
<li><b>Valid To:</b> <span class="copy">${cert.validity.notAfter}</span></li>
<li><b>Serial Number:</b> <span class="copy">${cert.serialNumber}</span></li>
</ul>`;
}
async function run() {
await PreviewCertificates();
await PreviewText();
await enableCopyLogic();
await enableCollapsibleLogic();
await setupSafeView();
}
async function PreviewCertificates() {
await Promise.all(
Array.from(document.querySelectorAll(".pem-certificate")).map(async el => {
let certificate = await (await fetch(el.getAttribute("pem"))).text();
el.innerHTML = decodeCertificate(certificate);
let cyberChefUrl =
`https://gchq.github.io/CyberChef/#recipe=Parse_X.509_certificate('PEM')&input=${btoa(certificate)}`;
// create a new anchor with this url and append after the code
let a = document.createElement("a");
a.href = cyberChefUrl;
a.textContent = "Full certificate details";
el.parentElement.appendChild(a);
})
);
console.log("certificate preview done");
}
async function PreviewText() {
await Promise.all(
Array.from(document.querySelectorAll(".text-preview")).map(async el => {
let textContent = await (await fetch(el.getAttribute("url"))).text();
el.textContent = textContent;
})
);
console.log("text preview done");
}
// notification logic
const notification = document.getElementById("notification");
@ -198,83 +225,99 @@
}
// copy logic
Array.from(document.querySelectorAll(".copy")).forEach(el => {
el.onclick = () => {
document.execCommand("copy");
}
el.addEventListener("copy", (e) => {
e.preventDefault();
if (e.clipboardData) {
if (el.hasAttribute("copy-value")) {
e.clipboardData.setData("text/plain", el.getAttribute("copy-value"));
} else {
e.clipboardData.setData("text/plain", el.textContent);
async function enableCopyLogic() {
await Promise.all(
Array.from(document.querySelectorAll(".copy")).map(el => {
el.onclick = () => {
document.execCommand("copy");
}
console.log(e.clipboardData.getData("text"))
showNotification("copied!")
}
})
})
el.addEventListener("copy", (e) => {
e.preventDefault();
if (e.clipboardData) {
if (el.hasAttribute("copy-value")) {
e.clipboardData.setData("text/plain", el.getAttribute("copy-value"));
} else if (el.hasAttribute("copy-value64")) {
// TODO: figure out how to decode unicode chars into utf-8
e.clipboardData.setData("text/plain", new String(atob(el.getAttribute(
"copy-value64"))));
} else {
e.clipboardData.setData("text/plain", el.textContent);
}
console.log(e.clipboardData.getData("text"))
showNotification("copied!")
}
})
})
)
console.log("copy logic enabled");
}
// collapsibles
let coll = document.getElementsByClassName("collapsible");
let i;
for (i = 0; i < coll.length; i++) {
coll[i].addEventListener("click", function () {
this.classList.toggle("active");
// let content = this.nextElementSibling;
let content = this.parentElement.querySelector(".collapsible-content");
if (content.style.display === "block") {
content.style.display = "none";
} else {
content.style.display = "block";
}
});
async function enableCollapsibleLogic() {
let coll = document.getElementsByClassName("collapsible");
for (let i = 0; i < coll.length; i++) {
await new Promise(resolve => {
coll[i].addEventListener("click", function () {
this.classList.toggle("active");
// let content = this.nextElementSibling;
let content = this.parentElement.querySelector(".collapsible-content");
if (content.style.display === "block") {
content.style.display = "none";
} else {
content.style.display = "block";
}
});
resolve();
})
}
console.log("collapsible logic enabled");
}
// logic for enabled/disabled greyscale
// Get references to the checkboxes and images/videos
const safeImageViewCheckbox = document.getElementById('safe-media-view');
const imagesVideos = document.querySelectorAll('img, video');
async function setupSafeView() {
// logic for enabled/disabled greyscale
// Get references to the checkboxes and images/videos
const safeImageViewCheckbox = document.getElementById('safe-media-view');
const imagesVideos = document.querySelectorAll('img, video');
// Function to toggle grayscale effect
function toggleGrayscale() {
// Function to toggle grayscale effect
function toggleGrayscale() {
imagesVideos.forEach(element => {
if (safeImageViewCheckbox.checked) {
// Enable grayscale effect
element.style.filter = 'grayscale(1)';
element.style.webkitFilter = 'grayscale(1)';
} else {
// Disable grayscale effect
element.style.filter = 'none';
element.style.webkitFilter = 'none';
}
});
}
// Add event listener to the checkbox to trigger the toggleGrayscale function
safeImageViewCheckbox.addEventListener('change', toggleGrayscale);
// Handle the hover effect using JavaScript
imagesVideos.forEach(element => {
if (safeImageViewCheckbox.checked) {
// Enable grayscale effect
element.style.filter = 'grayscale(1)';
element.style.webkitFilter = 'grayscale(1)';
} else {
// Disable grayscale effect
element.addEventListener('mouseenter', () => {
// Disable grayscale effect on hover
element.style.filter = 'none';
element.style.webkitFilter = 'none';
}
});
element.addEventListener('mouseleave', () => {
// Re-enable grayscale effect if checkbox is checked
if (safeImageViewCheckbox.checked) {
element.style.filter = 'grayscale(1)';
element.style.webkitFilter = 'grayscale(1)';
}
});
});
toggleGrayscale();
console.log("grayscale logic enabled");
}
// Add event listener to the checkbox to trigger the toggleGrayscale function
safeImageViewCheckbox.addEventListener('change', toggleGrayscale);
// Handle the hover effect using JavaScript
imagesVideos.forEach(element => {
element.addEventListener('mouseenter', () => {
// Disable grayscale effect on hover
element.style.filter = 'none';
element.style.webkitFilter = 'none';
});
element.addEventListener('mouseleave', () => {
// Re-enable grayscale effect if checkbox is checked
if (safeImageViewCheckbox.checked) {
element.style.filter = 'grayscale(1)';
element.style.webkitFilter = 'grayscale(1)';
}
});
});
// Call the function on page load to apply the initial state
toggleGrayscale();
run();
</script>
</html>

Wyświetl plik

@ -18,6 +18,12 @@ No URL available for {{ m.key }}.
<a href="https://www.bing.com/images/search?view=detailv2&iss=sbi&form=SBIVSP&sbisrc=UrlPaste&q=imgurl:{{ url | quote }}">Bing</a>,&nbsp;
<a href="https://www.tineye.com/search/?url={{ url | quote }}">Tineye</a>
</div>
<div>
Image Forensics:&nbsp;
<a href="https://fotoforensics.com/?url={{ url | quote }}">FotoForensics</a>,&nbsp;
<a href="https://mever.iti.gr/forensics/?image={{ url }}">Media Verification Assistant</a>
</div>
<p></p>
</div>
{% elif 'video' in m.mimetype %}
@ -35,8 +41,15 @@ No URL available for {{ m.key }}.
</div>
{% elif m.filename | get_extension == ".wacz" %}
<a href="https://replayweb.page/?source={{ url | quote }}#view=pages&url={{ main_url }}">replayweb</a>
{% elif m.filename | get_extension == ".pem" %}
<code class="pem-certificate" pem="{{url}}"></code>
{% elif 'text' in m.mimetype %}
<div>PREVIEW:<br/><code><pre class="text-preview" url="{{url}}"></pre></code></div>
{% else %}
No preview available for {{ m.key }}.
No preview available for <code>{{ m.key }}</code>.
{% endif %}
{% else %}
{{ m.url | urlize }}
@ -54,7 +67,12 @@ No preview available for {{ m.key }}.
{% macro copy_urlize(val, href_text) -%}
{% if val is mapping %}
{% if val | is_list %}
{% for item in val %}
{{ copy_urlize(item) }}
{% endfor %}
{% elif val is mapping %}
<ul>
{% for key in val %}
<li>
@ -64,11 +82,66 @@ No preview available for {{ m.key }}.
</ul>
{% else %}
{% if href_text | length == 0 %}
{% if href_text | length == 0 %}
<span class="copy">{{ val | string | urlize }}</span>
{% else %}
<span class="copy" copy-value="{{val}}">{{ href_text | string | urlize }}</span>
{% endif %}
{% endif %}
{%- endmacro -%}
{% macro display_recursive(prop, skip_display) -%}
{% if prop is mapping %}
<div class="center copy" copy-value64='{{prop | json_dump_b64}}'>Copy as JSON</div>
<ul>
{% for subprop in prop %}
<li>
<b>{{ subprop }}:</b>
{{ display_recursive(prop[subprop]) }}
</li>
{% endfor %}
</ul>
{% elif prop | is_list %}
{% for item in prop %}
<li>
{{ display_recursive(item) }}
</li>
{% endfor %}
{% elif prop | is_media %}
{% if not skip_display %}
{{ display_media(prop, true) }}
{% endif %}
<ul>
<li><b>key:</b> <span class="copy">{{ prop.key }}</span></li>
<li><b>type:</b> <span class="copy">{{ prop.mimetype }}</span></li>
{% for subprop in prop.properties %}
{% if prop.properties[subprop] | is_list %}
<p></p>
<div>
<b class="collapsible" title="expand">{{ subprop }} ({{ prop.properties[subprop] | length }}):</b>
<p></p>
<div class="collapsible-content">
{% for subsubprop in prop.properties[subprop] %}
{{ display_recursive(subsubprop) }}
{% endfor %}
</div>
</div>
<p></p>
{% elif prop.properties[subprop] | string | length > 1 %}
<li><b>{{ subprop }}:</b> {{ copy_urlize(prop.properties[subprop]) }}</li>
{% endif %}
{% endfor %}
</ul>
{% else %}
{{ copy_urlize(prop) }}
{% endif %}
{%- endmacro -%}

Wyświetl plik

@ -1,21 +1,24 @@
from __future__ import annotations
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.proxy import Proxy, ProxyType
from loguru import logger
from selenium.webdriver.common.by import By
import time
class Webdriver:
def __init__(self, width: int, height: int, timeout_seconds: int, facebook_accept_cookies: bool = False) -> webdriver:
def __init__(self, width: int, height: int, timeout_seconds: int, facebook_accept_cookies: bool = False, http_proxy: str = "") -> webdriver:
self.width = width
self.height = height
self.timeout_seconds = timeout_seconds
self.facebook_accept_cookies = facebook_accept_cookies
self.http_proxy = http_proxy
def __enter__(self) -> webdriver:
options = webdriver.FirefoxOptions()
options.add_argument("--headless")
options.add_argument(f'--proxy-server={self.http_proxy}')
options.set_preference('network.protocol-handler.external.tg', False)
try:
self.driver = webdriver.Firefox(options=options)

Wyświetl plik

@ -1,9 +1,9 @@
_MAJOR = "0"
_MINOR = "8"
_MINOR = "9"
# On main and in a nightly release the patch should be one ahead of the last
# released build.
_PATCH = "1"
_PATCH = "0"
# This is mainly for nightly builds which have the suffix ".dev$DATE". See
# https://semver.org/#is-v123-a-semantic-version for the semantics.
_SUFFIX = ""