kopia lustrzana https://github.com/bellingcat/auto-archiver
Porównaj commity
41 Commity
f941dfb146
...
c9f361efb4
Autor | SHA1 | Data |
---|---|---|
msramalho | c9f361efb4 | |
msramalho | e9f2baca62 | |
R. Miles McCain | f603400d0d | |
msramalho | eb37f0b45b | |
msramalho | 75497f5773 | |
msramalho | 623e555713 | |
msramalho | 9c7824de57 | |
msramalho | f4827770e6 | |
msramalho | 601572d76e | |
msramalho | d21e79a272 | |
msramalho | ccf5f857ef | |
msramalho | 7de317d1b5 | |
msramalho | 70075a1e5e | |
msramalho | 5b9bc4919a | |
msramalho | f0158ffd9c | |
msramalho | bfb35a43a9 | |
msramalho | ef5b39c4f1 | |
msramalho | 24ceafcb64 | |
msramalho | 9fd4bb56a8 | |
msramalho | 5324d562ba | |
msramalho | 5bf0a0206d | |
msramalho | 4941823565 | |
msramalho | 27310c2911 | |
msramalho | eb973ba42d | |
Miguel Sozinho Ramalho | 7a21ae96af | |
msramalho | 5c49124ac6 | |
Kai | b9d71d0b3f | |
msramalho | b9b831ce03 | |
msramalho | 2a773a25e8 | |
msramalho | 719645fc2d | |
Chu-An, Huang | 71fcf5a089 | |
Tomas Apodaca | 590d3fe824 | |
Miguel Sozinho Ramalho | e6b6b83007 | |
msramalho | 499832d146 | |
msramalho | fa1163532b | |
msramalho | 96f6ea8f09 | |
Miguel Sozinho Ramalho | ff17dfd0aa | |
msramalho | 0a3053bbc7 | |
Miguel Sozinho Ramalho | e69660be82 | |
Miguel Sozinho Ramalho | a786d4bb0e | |
Miguel Sozinho Ramalho | 128d4136e3 |
|
@ -8,9 +8,6 @@ name: Docker
|
|||
on:
|
||||
release:
|
||||
types: [published]
|
||||
push:
|
||||
# branches: [ "main" ]
|
||||
tags: [ "v*.*.*" ]
|
||||
|
||||
env:
|
||||
# Use docker.io for Docker Hub if empty
|
||||
|
|
|
@ -11,9 +11,6 @@ name: Pypi
|
|||
on:
|
||||
release:
|
||||
types: [published]
|
||||
push:
|
||||
# branches: [ "main" ]
|
||||
tags: [ "v*.*.*" ]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
|
|
@ -27,4 +27,5 @@ instaloader.session
|
|||
orchestration.yaml
|
||||
auto_archiver.egg-info*
|
||||
logs*
|
||||
*.csv
|
||||
*.csv
|
||||
archived/
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM webrecorder/browsertrix-crawler:latest
|
||||
FROM webrecorder/browsertrix-crawler:1.0.4
|
||||
|
||||
ENV RUNNING_IN_DOCKER=1
|
||||
|
||||
|
@ -19,9 +19,8 @@ RUN pip install --upgrade pip && \
|
|||
|
||||
COPY Pipfile* ./
|
||||
# install from pipenv, with browsertrix-only requirements
|
||||
RUN pipenv install && \
|
||||
pipenv install pywb uwsgi
|
||||
|
||||
RUN pipenv install
|
||||
|
||||
# doing this at the end helps during development, builds are quick
|
||||
COPY ./src/ .
|
||||
|
||||
|
|
5
Pipfile
5
Pipfile
|
@ -36,6 +36,11 @@ requests = {extras = ["socks"], version = "*"}
|
|||
numpy = "*"
|
||||
warcio = "*"
|
||||
jsonlines = "*"
|
||||
pysubs2 = "*"
|
||||
minify-html = "*"
|
||||
retrying = "*"
|
||||
tsp-client = "*"
|
||||
certvalidator = "*"
|
||||
|
||||
[dev-packages]
|
||||
autopep8 = "*"
|
||||
|
|
Plik diff jest za duży
Load Diff
42
README.md
42
README.md
|
@ -177,9 +177,41 @@ To use Google Drive storage you need the id of the shared folder in the `config.
|
|||
#### Telethon + Instagram with telegram bot
|
||||
The first time you run, you will be prompted to do a authentication with the phone number associated, alternatively you can put your `anon.session` in the root.
|
||||
|
||||
#### Atlos
|
||||
When integrating with [Atlos](https://atlos.org), you will need to provide an API token in your configuration. You can learn more about Atlos and how to get an API token [here](https://docs.atlos.org/technical/api). You will have to provide this token to the `atlos_feeder`, `atlos_storage`, and `atlos_db` steps in your orchestration file. If you use a custom or self-hosted Atlos instance, you can also specify the `atlos_url` option to point to your custom instance's URL. For example:
|
||||
|
||||
```yaml
|
||||
# orchestration.yaml content
|
||||
steps:
|
||||
feeder: atlos_feeder
|
||||
archivers: # order matters
|
||||
- youtubedl_archiver
|
||||
enrichers:
|
||||
- thumbnail_enricher
|
||||
- hash_enricher
|
||||
formatter: html_formatter
|
||||
storages:
|
||||
- atlos_storage
|
||||
databases:
|
||||
- console_db
|
||||
- atlos_db
|
||||
|
||||
configurations:
|
||||
atlos_feeder:
|
||||
atlos_url: "https://platform.atlos.org" # optional
|
||||
api_token: "...your API token..."
|
||||
atlos_db:
|
||||
atlos_url: "https://platform.atlos.org" # optional
|
||||
api_token: "...your API token..."
|
||||
atlos_storage:
|
||||
atlos_url: "https://platform.atlos.org" # optional
|
||||
api_token: "...your API token..."
|
||||
hash_enricher:
|
||||
algorithm: "SHA-256"
|
||||
```
|
||||
|
||||
## Running on Google Sheets Feeder (gsheet_feeder)
|
||||
The `--gseets_feeder.sheet` property is the name of the Google Sheet to check for URLs.
|
||||
The `--gsheet_feeder.sheet` property is the name of the Google Sheet to check for URLs.
|
||||
This sheet must have been shared with the Google Service account used by `gspread`.
|
||||
This sheet must also have specific columns (case-insensitive) in the `header` as specified in [Gsheet.configs](src/auto_archiver/utils/gsheet.py). The default names of these columns and their purpose is:
|
||||
|
||||
|
@ -233,12 +265,12 @@ working with docker locally:
|
|||
* to use local archive, also create a volume `-v` for it by adding `-v $PWD/local_archive:/app/local_archive`
|
||||
|
||||
|
||||
release to docker hub
|
||||
manual release to docker hub
|
||||
* `docker image tag auto-archiver bellingcat/auto-archiver:latest`
|
||||
* `docker push bellingcat/auto-archiver`
|
||||
|
||||
#### RELEASE
|
||||
* update version in [version.py](src/auto_archiver/version.py)
|
||||
* run `bash ./scripts/release.sh` and confirm
|
||||
* package is automatically updated in pypi
|
||||
* docker image is automatically pushed to dockerhup
|
||||
* go to github releases > new release > use `vx.y.z` for matching version notation
|
||||
* package is automatically updated in pypi
|
||||
* docker image is automatically pushed to dockerhup
|
||||
|
|
|
@ -7,6 +7,7 @@ steps:
|
|||
# - telegram_archiver
|
||||
# - twitter_archiver
|
||||
# - twitter_api_archiver
|
||||
# - instagram_api_archiver
|
||||
# - instagram_tbot_archiver
|
||||
# - instagram_archiver
|
||||
# - tiktok_archiver
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
TAG=$(python -c 'from src.auto_archiver.version import __version__; print("v" + __version__)')
|
||||
|
||||
read -p "Creating new release for $TAG. Do you want to continue? [Y/n] " prompt
|
||||
|
||||
if [[ $prompt == "y" || $prompt == "Y" || $prompt == "yes" || $prompt == "Yes" ]]; then
|
||||
# git add -A
|
||||
# git commit -m "Bump version to $TAG for release" || true && git push
|
||||
echo "Creating new git tag $TAG"
|
||||
git tag "$TAG" -m "$TAG"
|
||||
git push --tags
|
||||
else
|
||||
echo "Cancelled"
|
||||
exit 1
|
||||
fi
|
|
@ -7,4 +7,5 @@ from .instagram_tbot_archiver import InstagramTbotArchiver
|
|||
from .tiktok_archiver import TiktokArchiver
|
||||
from .telegram_archiver import TelegramArchiver
|
||||
from .vk_archiver import VkArchiver
|
||||
from .youtubedl_archiver import YoutubeDLArchiver
|
||||
from .youtubedl_archiver import YoutubeDLArchiver
|
||||
from .instagram_api_archiver import InstagramAPIArchiver
|
|
@ -3,6 +3,8 @@ from abc import abstractmethod
|
|||
from dataclasses import dataclass
|
||||
import os
|
||||
import mimetypes, requests
|
||||
from loguru import logger
|
||||
from retrying import retry
|
||||
|
||||
from ..core import Metadata, Step, ArchivingContext
|
||||
|
||||
|
@ -23,6 +25,10 @@ class Archiver(Step):
|
|||
# used when archivers need to login or do other one-time setup
|
||||
pass
|
||||
|
||||
def cleanup(self) -> None:
|
||||
# called when archivers are done, or upon errors, cleanup any resources
|
||||
pass
|
||||
|
||||
def sanitize_url(self, url: str) -> str:
|
||||
# used to clean unnecessary URL parameters OR unfurl redirect links
|
||||
return url
|
||||
|
@ -37,16 +43,17 @@ class Archiver(Step):
|
|||
return mime.split("/")[0]
|
||||
return ""
|
||||
|
||||
def download_from_url(self, url: str, to_filename: str = None, item: Metadata = None) -> str:
|
||||
@retry(wait_random_min=500, wait_random_max=3500, stop_max_attempt_number=5)
|
||||
def download_from_url(self, url: str, to_filename: str = None, verbose=True) -> str:
|
||||
"""
|
||||
downloads a URL to provided filename, or inferred from URL, returns local filename, if item is present will use its tmp_dir
|
||||
downloads a URL to provided filename, or inferred from URL, returns local filename
|
||||
"""
|
||||
if not to_filename:
|
||||
to_filename = url.split('/')[-1].split('?')[0]
|
||||
if len(to_filename) > 64:
|
||||
to_filename = to_filename[-64:]
|
||||
if item:
|
||||
to_filename = os.path.join(ArchivingContext.get_tmp_dir(), to_filename)
|
||||
to_filename = os.path.join(ArchivingContext.get_tmp_dir(), to_filename)
|
||||
if verbose: logger.debug(f"downloading {url[0:50]=} {to_filename=}")
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'
|
||||
}
|
||||
|
|
|
@ -0,0 +1,326 @@
|
|||
import re, requests
|
||||
from datetime import datetime
|
||||
from loguru import logger
|
||||
from retrying import retry
|
||||
from tqdm import tqdm
|
||||
|
||||
from . import Archiver
|
||||
from ..core import Metadata
|
||||
from ..core import Media
|
||||
|
||||
class InstagramAPIArchiver(Archiver):
|
||||
"""
|
||||
Uses an https://github.com/subzeroid/instagrapi API deployment to fetch instagram posts data
|
||||
|
||||
# TODO: improvement collect aggregates of locations[0].location and mentions for all posts
|
||||
"""
|
||||
name = "instagram_api_archiver"
|
||||
|
||||
global_pattern = re.compile(r"(?:(?:http|https):\/\/)?(?:www.)?(?:instagram.com)\/(stories(?:\/highlights)?|p|reel)?\/?([^\/\?]*)\/?(\d+)?")
|
||||
|
||||
def __init__(self, config: dict) -> None:
|
||||
super().__init__(config)
|
||||
self.assert_valid_string("access_token")
|
||||
self.assert_valid_string("api_endpoint")
|
||||
self.full_profile_max_posts = int(self.full_profile_max_posts)
|
||||
if self.api_endpoint[-1] == "/": self.api_endpoint = self.api_endpoint[:-1]
|
||||
|
||||
self.full_profile = bool(self.full_profile)
|
||||
self.minimize_json_output = bool(self.minimize_json_output)
|
||||
|
||||
@staticmethod
|
||||
def configs() -> dict:
|
||||
return {
|
||||
"access_token": {"default": None, "help": "a valid instagrapi-api token"},
|
||||
"api_endpoint": {"default": None, "help": "API endpoint to use"},
|
||||
"full_profile": {"default": False, "help": "if true, will download all posts, tagged posts, stories, and highlights for a profile, if false, will only download the profile pic and information."},
|
||||
"full_profile_max_posts": {"default": 0, "help": "Use to limit the number of posts to download when full_profile is true. 0 means no limit. limit is applied softly since posts are fetched in batch, once to: posts, tagged posts, and highlights"},
|
||||
"minimize_json_output": {"default": True, "help": "if true, will remove empty values from the json output"},
|
||||
}
|
||||
|
||||
def download(self, item: Metadata) -> Metadata:
|
||||
url = item.get_url()
|
||||
|
||||
url.replace("instagr.com", "instagram.com").replace("instagr.am", "instagram.com")
|
||||
insta_matches = self.global_pattern.findall(url)
|
||||
logger.info(f"{insta_matches=}")
|
||||
if not len(insta_matches) or len(insta_matches[0])!=3: return
|
||||
if len(insta_matches) > 1:
|
||||
logger.warning(f"Multiple instagram matches found in {url=}, using the first one")
|
||||
return
|
||||
g1, g2, g3 = insta_matches[0][0], insta_matches[0][1], insta_matches[0][2]
|
||||
if g1 == "": return self.download_profile(item, g2)
|
||||
elif g1 == "p": return self.download_post(item, g2, context="post")
|
||||
elif g1 == "reel": return self.download_post(item, g2, context="reel")
|
||||
elif g1 == "stories/highlights": return self.download_highlights(item, g2)
|
||||
elif g1 == "stories":
|
||||
if len(g3): return self.download_post(item, id=g3, context="story")
|
||||
return self.download_stories(item, g2)
|
||||
else:
|
||||
logger.warning(f"Unknown instagram regex group match {g1=} found in {url=}")
|
||||
return
|
||||
|
||||
@retry(wait_random_min=1000, wait_random_max=3000, stop_max_attempt_number=5)
|
||||
def call_api(self, path: str, params: dict) -> dict:
|
||||
headers = {
|
||||
"accept": "application/json",
|
||||
"x-access-key": self.access_token
|
||||
}
|
||||
logger.debug(f"calling {self.api_endpoint}/{path} with {params=}")
|
||||
return requests.get(f"{self.api_endpoint}/{path}", headers=headers, params=params).json()
|
||||
|
||||
def cleanup_dict(self, d: dict | list) -> dict:
|
||||
# repeats 3 times to remove nested empty values
|
||||
if not self.minimize_json_output: return d
|
||||
if type(d) == list: return [self.cleanup_dict(v) for v in d]
|
||||
if type(d) != dict: return d
|
||||
return {
|
||||
k: clean_v
|
||||
for k, v in d.items()
|
||||
if (clean_v := self.cleanup_dict(v)) not in [0.0, 0, [], {}, "", None, "null"] and
|
||||
k not in ["x", "y", "width", "height"]
|
||||
}
|
||||
|
||||
def download_profile(self, result: Metadata, username: str) -> Metadata:
|
||||
# download basic profile info
|
||||
url = result.get_url()
|
||||
user = self.call_api("v2/user/by/username", {"username": username}).get("user")
|
||||
assert user, f"User {username} not found"
|
||||
user = self.cleanup_dict(user)
|
||||
|
||||
result.set_title(user.get("full_name", username)).set("data", user)
|
||||
if pic_url := user.get("profile_pic_url_hd", user.get("profile_pic_url")):
|
||||
filename = self.download_from_url(pic_url)
|
||||
result.add_media(Media(filename=filename), id=f"profile_picture")
|
||||
|
||||
if self.full_profile:
|
||||
user_id = user.get("pk")
|
||||
# download all stories
|
||||
try:
|
||||
stories = self._download_stories_reusable(result, username)
|
||||
result.set("#stories", len(stories))
|
||||
except Exception as e:
|
||||
result.append("errors", f"Error downloading stories for {username}")
|
||||
logger.error(f"Error downloading stories for {username}: {e}")
|
||||
|
||||
# download all posts
|
||||
try:
|
||||
self.download_all_posts(result, user_id)
|
||||
except Exception as e:
|
||||
result.append("errors", f"Error downloading posts for {username}")
|
||||
logger.error(f"Error downloading posts for {username}: {e}")
|
||||
|
||||
# download all tagged
|
||||
try:
|
||||
self.download_all_tagged(result, user_id)
|
||||
except Exception as e:
|
||||
result.append("errors", f"Error downloading tagged posts for {username}")
|
||||
logger.error(f"Error downloading tagged posts for {username}: {e}")
|
||||
|
||||
# download all highlights
|
||||
try:
|
||||
self.download_all_highlights(result, username, user_id)
|
||||
except Exception as e:
|
||||
result.append("errors", f"Error downloading highlights for {username}")
|
||||
logger.error(f"Error downloading highlights for {username}: {e}")
|
||||
|
||||
|
||||
result.set_url(url) # reset as scrape_item modifies it
|
||||
return result.success("insta profile")
|
||||
|
||||
def download_all_highlights(self, result, username, user_id):
|
||||
count_highlights = 0
|
||||
highlights = self.call_api(f"v1/user/highlights", {"user_id": user_id})
|
||||
for h in highlights:
|
||||
try:
|
||||
h_info = self._download_highlights_reusable(result, h.get("pk"))
|
||||
count_highlights += len(h_info.get("items", []))
|
||||
except Exception as e:
|
||||
result.append("errors", f"Error downloading highlight id{h.get('pk')} for {username}")
|
||||
logger.error(f"Error downloading highlight id{h.get('pk')} for {username}: {e}")
|
||||
if self.full_profile_max_posts and count_highlights >= self.full_profile_max_posts:
|
||||
logger.info(f"HIGHLIGHTS reached full_profile_max_posts={self.full_profile_max_posts}")
|
||||
break
|
||||
result.set("#highlights", count_highlights)
|
||||
|
||||
def download_post(self, result: Metadata, code: str = None, id: str = None, context: str = None) -> Metadata:
|
||||
if id:
|
||||
post = self.call_api(f"v1/media/by/id", {"id": id})
|
||||
else:
|
||||
post = self.call_api(f"v1/media/by/code", {"code": code})
|
||||
assert post, f"Post {id or code} not found"
|
||||
|
||||
if caption_text := post.get("caption_text"):
|
||||
result.set_title(caption_text)
|
||||
|
||||
post = self.scrape_item(result, post, context)
|
||||
|
||||
if post.get("taken_at"): result.set_timestamp(post.get("taken_at"))
|
||||
return result.success(f"insta {context or 'post'}")
|
||||
|
||||
def download_highlights(self, result: Metadata, id: str) -> Metadata:
|
||||
h_info = self._download_highlights_reusable(result, id)
|
||||
items = len(h_info.get("items", []))
|
||||
del h_info["items"]
|
||||
result.set_title(h_info.get("title")).set("data", h_info).set("#reels", items)
|
||||
return result.success("insta highlights")
|
||||
|
||||
def _download_highlights_reusable(self, result: Metadata, id: str) ->dict:
|
||||
full_h = self.call_api(f"v2/highlight/by/id", {"id": id})
|
||||
h_info = full_h.get("response", {}).get("reels", {}).get(f"highlight:{id}")
|
||||
assert h_info, f"Highlight {id} not found: {full_h=}"
|
||||
|
||||
if cover_media := h_info.get("cover_media", {}).get("cropped_image_version", {}).get("url"):
|
||||
filename = self.download_from_url(cover_media)
|
||||
result.add_media(Media(filename=filename), id=f"cover_media highlight {id}")
|
||||
|
||||
items = h_info.get("items", [])[::-1] # newest to oldest
|
||||
for h in tqdm(items, desc="downloading highlights", unit="highlight"):
|
||||
try: self.scrape_item(result, h, "highlight")
|
||||
except Exception as e:
|
||||
result.append("errors", f"Error downloading highlight {h.get('id')}")
|
||||
logger.error(f"Error downloading highlight, skipping {h.get('id')}: {e}")
|
||||
|
||||
return h_info
|
||||
|
||||
def download_stories(self, result: Metadata, username: str) -> Metadata:
|
||||
now = datetime.now().strftime("%Y-%m-%d_%H-%M")
|
||||
stories = self._download_stories_reusable(result, username)
|
||||
if stories == []: return result.success("insta no story")
|
||||
result.set_title(f"stories {username} at {now}").set("#stories", len(stories))
|
||||
return result.success(f"insta stories {now}")
|
||||
|
||||
def _download_stories_reusable(self, result: Metadata, username: str) -> list[dict]:
|
||||
stories = self.call_api(f"v1/user/stories/by/username", {"username": username})
|
||||
if not stories or not len(stories): return []
|
||||
stories = stories[::-1] # newest to oldest
|
||||
|
||||
for s in tqdm(stories, desc="downloading stories", unit="story"):
|
||||
try: self.scrape_item(result, s, "story")
|
||||
except Exception as e:
|
||||
result.append("errors", f"Error downloading story {s.get('id')}")
|
||||
logger.error(f"Error downloading story, skipping {s.get('id')}: {e}")
|
||||
return stories
|
||||
|
||||
def download_all_posts(self, result: Metadata, user_id: str):
|
||||
end_cursor = None
|
||||
pbar = tqdm(desc="downloading posts")
|
||||
|
||||
post_count = 0
|
||||
while end_cursor != "":
|
||||
posts = self.call_api(f"v1/user/medias/chunk", {"user_id": user_id, "end_cursor": end_cursor})
|
||||
if not len(posts) or not type(posts) == list or len(posts) != 2: break
|
||||
posts, end_cursor = posts[0], posts[1]
|
||||
logger.info(f"parsing {len(posts)} posts, next {end_cursor=}")
|
||||
|
||||
for p in posts:
|
||||
try: self.scrape_item(result, p, "post")
|
||||
except Exception as e:
|
||||
result.append("errors", f"Error downloading post {p.get('id')}")
|
||||
logger.error(f"Error downloading post, skipping {p.get('id')}: {e}")
|
||||
pbar.update(1)
|
||||
post_count+=1
|
||||
if self.full_profile_max_posts and post_count >= self.full_profile_max_posts:
|
||||
logger.info(f"POSTS reached full_profile_max_posts={self.full_profile_max_posts}")
|
||||
break
|
||||
result.set("#posts", post_count)
|
||||
|
||||
def download_all_tagged(self, result: Metadata, user_id: str):
|
||||
next_page_id = ""
|
||||
pbar = tqdm(desc="downloading tagged posts")
|
||||
|
||||
tagged_count = 0
|
||||
while next_page_id != None:
|
||||
resp = self.call_api(f"v2/user/tag/medias", {"user_id": user_id, "page_id": next_page_id})
|
||||
posts = resp.get("response", {}).get("items", [])
|
||||
if not len(posts): break
|
||||
next_page_id = resp.get("next_page_id")
|
||||
|
||||
logger.info(f"parsing {len(posts)} tagged posts, next {next_page_id=}")
|
||||
|
||||
for p in posts:
|
||||
try: self.scrape_item(result, p, "tagged")
|
||||
except Exception as e:
|
||||
result.append("errors", f"Error downloading tagged post {p.get('id')}")
|
||||
logger.error(f"Error downloading tagged post, skipping {p.get('id')}: {e}")
|
||||
pbar.update(1)
|
||||
tagged_count+=1
|
||||
if self.full_profile_max_posts and tagged_count >= self.full_profile_max_posts:
|
||||
logger.info(f"TAGS reached full_profile_max_posts={self.full_profile_max_posts}")
|
||||
break
|
||||
result.set("#tagged", tagged_count)
|
||||
|
||||
|
||||
### reusable parsing utils below
|
||||
|
||||
def scrape_item(self, result:Metadata, item:dict, context:str=None) -> dict:
|
||||
"""
|
||||
receives a Metadata and an API dict response
|
||||
fetches the media and adds it to the Metadata
|
||||
cleans and returns the API dict
|
||||
context can be used to give specific id prefixes to media
|
||||
"""
|
||||
if "clips_metadata" in item:
|
||||
if reusable_text := item.get("clips_metadata", {}).get("reusable_text_attribute_string"):
|
||||
item["clips_metadata_text"] = reusable_text
|
||||
if self.minimize_json_output:
|
||||
del item["clips_metadata"]
|
||||
|
||||
if code := item.get("code") and not result.get("url"):
|
||||
result.set_url(f"https://www.instagram.com/p/{code}/")
|
||||
|
||||
resources = item.get("resources", item.get("carousel_media", []))
|
||||
item, media, media_id = self.scrape_media(item, context)
|
||||
# if resources are present take the main media from the first resource
|
||||
if not media and len(resources):
|
||||
_, media, media_id = self.scrape_media(resources[0], context)
|
||||
resources = resources[1:]
|
||||
|
||||
assert media, f"Image/video not found in {item=}"
|
||||
|
||||
# posts with multiple items contain a resources list
|
||||
resources_metadata = Metadata()
|
||||
for r in resources:
|
||||
self.scrape_item(resources_metadata, r)
|
||||
if not resources_metadata.is_empty():
|
||||
media.set("other media", resources_metadata.media)
|
||||
|
||||
result.add_media(media, id=media_id)
|
||||
return item
|
||||
|
||||
def scrape_media(self, item: dict, context:str) -> tuple[dict, Media, str]:
|
||||
# remove unnecessary info
|
||||
if self.minimize_json_output:
|
||||
for k in ["image_versions", "video_versions", "video_dash_manifest", "image_versions2", "video_versions2"]:
|
||||
if k in item: del item[k]
|
||||
item = self.cleanup_dict(item)
|
||||
|
||||
image_media = None
|
||||
if image_url := item.get("thumbnail_url"):
|
||||
filename = self.download_from_url(image_url, verbose=False)
|
||||
image_media = Media(filename=filename)
|
||||
|
||||
# retrieve video info
|
||||
best_id = item.get('id', item.get('pk'))
|
||||
taken_at = item.get("taken_at", item.get("taken_at_ts"))
|
||||
code = item.get("code")
|
||||
caption_text = item.get("caption_text")
|
||||
if "carousel_media" in item: del item["carousel_media"]
|
||||
|
||||
if video_url := item.get("video_url"):
|
||||
filename = self.download_from_url(video_url, verbose=False)
|
||||
video_media = Media(filename=filename)
|
||||
if taken_at: video_media.set("date", taken_at)
|
||||
if code: video_media.set("url", f"https://www.instagram.com/p/{code}")
|
||||
if caption_text: video_media.set("text", caption_text)
|
||||
video_media.set("preview", [image_media])
|
||||
video_media.set("data", [item])
|
||||
return item, video_media, f"{context or 'video'} {best_id}"
|
||||
elif image_media:
|
||||
if taken_at: image_media.set("date", taken_at)
|
||||
if code: image_media.set("url", f"https://www.instagram.com/p/{code}")
|
||||
if caption_text: image_media.set("text", caption_text)
|
||||
image_media.set("data", [item])
|
||||
return item, image_media, f"{context or 'image'} {best_id}"
|
||||
|
||||
return item, None, None
|
|
@ -1,10 +1,12 @@
|
|||
|
||||
import shutil
|
||||
from telethon.sync import TelegramClient
|
||||
from loguru import logger
|
||||
import time, os
|
||||
from sqlite3 import OperationalError
|
||||
from . import Archiver
|
||||
from ..core import Metadata, Media, ArchivingContext
|
||||
from ..utils import random_str
|
||||
|
||||
|
||||
class InstagramTbotArchiver(Archiver):
|
||||
|
@ -20,10 +22,6 @@ class InstagramTbotArchiver(Archiver):
|
|||
self.assert_valid_string("api_id")
|
||||
self.assert_valid_string("api_hash")
|
||||
self.timeout = int(self.timeout)
|
||||
try:
|
||||
self.client = TelegramClient(self.session_file, self.api_id, self.api_hash)
|
||||
except OperationalError as e:
|
||||
logger.error(f"Unable to access the {self.session_file} session, please make sure you don't use the same session file here and in telethon_archiver. if you do then disable at least one of the archivers for the 1st time you setup telethon session: {e}")
|
||||
|
||||
@staticmethod
|
||||
def configs() -> dict:
|
||||
|
@ -35,10 +33,30 @@ class InstagramTbotArchiver(Archiver):
|
|||
}
|
||||
|
||||
def setup(self) -> None:
|
||||
"""
|
||||
1. makes a copy of session_file that is removed in cleanup
|
||||
2. checks if the session file is valid
|
||||
"""
|
||||
logger.info(f"SETUP {self.name} checking login...")
|
||||
|
||||
# make a copy of the session that is used exclusively with this archiver instance
|
||||
new_session_file = os.path.join("secrets/", f"instabot-{time.strftime('%Y-%m-%d')}{random_str(8)}.session")
|
||||
shutil.copy(self.session_file + ".session", new_session_file)
|
||||
self.session_file = new_session_file.replace(".session", "")
|
||||
|
||||
try:
|
||||
self.client = TelegramClient(self.session_file, self.api_id, self.api_hash)
|
||||
except OperationalError as e:
|
||||
logger.error(f"Unable to access the {self.session_file} session, please make sure you don't use the same session file here and in telethon_archiver. if you do then disable at least one of the archivers for the 1st time you setup telethon session: {e}")
|
||||
|
||||
with self.client.start():
|
||||
logger.success(f"SETUP {self.name} login works.")
|
||||
|
||||
def cleanup(self) -> None:
|
||||
logger.info(f"CLEANUP {self.name}.")
|
||||
if os.path.exists(self.session_file):
|
||||
os.remove(self.session_file)
|
||||
|
||||
def download(self, item: Metadata) -> Metadata:
|
||||
url = item.get_url()
|
||||
if not "instagram.com" in url: return False
|
||||
|
|
|
@ -53,10 +53,10 @@ class TelegramArchiver(Archiver):
|
|||
|
||||
if not len(image_urls): return False
|
||||
for img_url in image_urls:
|
||||
result.add_media(Media(self.download_from_url(img_url, item=item)))
|
||||
result.add_media(Media(self.download_from_url(img_url)))
|
||||
else:
|
||||
video_url = video.get('src')
|
||||
m_video = Media(self.download_from_url(video_url, item=item))
|
||||
m_video = Media(self.download_from_url(video_url))
|
||||
# extract duration from HTML
|
||||
try:
|
||||
duration = s.find_all('time')[0].contents[0]
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
|
||||
import shutil
|
||||
from telethon.sync import TelegramClient
|
||||
from telethon.errors import ChannelInvalidError
|
||||
from telethon.tl.functions.messages import ImportChatInviteRequest
|
||||
|
@ -9,6 +10,7 @@ import re, time, json, os
|
|||
|
||||
from . import Archiver
|
||||
from ..core import Metadata, Media, ArchivingContext
|
||||
from ..utils import random_str
|
||||
|
||||
|
||||
class TelethonArchiver(Archiver):
|
||||
|
@ -21,8 +23,6 @@ class TelethonArchiver(Archiver):
|
|||
self.assert_valid_string("api_id")
|
||||
self.assert_valid_string("api_hash")
|
||||
|
||||
self.client = TelegramClient(self.session_file, self.api_id, self.api_hash)
|
||||
|
||||
@staticmethod
|
||||
def configs() -> dict:
|
||||
return {
|
||||
|
@ -40,10 +40,20 @@ class TelethonArchiver(Archiver):
|
|||
|
||||
def setup(self) -> None:
|
||||
"""
|
||||
1. trigger login process for telegram or proceed if already saved in a session file
|
||||
2. joins channel_invites where needed
|
||||
1. makes a copy of session_file that is removed in cleanup
|
||||
2. trigger login process for telegram or proceed if already saved in a session file
|
||||
3. joins channel_invites where needed
|
||||
"""
|
||||
logger.info(f"SETUP {self.name} checking login...")
|
||||
|
||||
# make a copy of the session that is used exclusively with this archiver instance
|
||||
new_session_file = os.path.join("secrets/", f"telethon-{time.strftime('%Y-%m-%d')}{random_str(8)}.session")
|
||||
shutil.copy(self.session_file + ".session", new_session_file)
|
||||
self.session_file = new_session_file.replace(".session", "")
|
||||
|
||||
# initiate the client
|
||||
self.client = TelegramClient(self.session_file, self.api_id, self.api_hash)
|
||||
|
||||
with self.client.start():
|
||||
logger.success(f"SETUP {self.name} login works.")
|
||||
|
||||
|
@ -89,6 +99,11 @@ class TelethonArchiver(Archiver):
|
|||
i += 1
|
||||
pbar.update()
|
||||
|
||||
def cleanup(self) -> None:
|
||||
logger.info(f"CLEANUP {self.name}.")
|
||||
if os.path.exists(self.session_file):
|
||||
os.remove(self.session_file)
|
||||
|
||||
def download(self, item: Metadata) -> Metadata:
|
||||
"""
|
||||
if this url is archivable will download post info and look for other posts from the same group with media.
|
||||
|
@ -137,7 +152,7 @@ class TelethonArchiver(Archiver):
|
|||
if len(other_media_urls):
|
||||
logger.debug(f"Got {len(other_media_urls)} other media urls from {mp.id=}: {other_media_urls}")
|
||||
for i, om_url in enumerate(other_media_urls):
|
||||
filename = self.download_from_url(om_url, f'{chat}_{group_id}_{i}', item)
|
||||
filename = self.download_from_url(om_url, f'{chat}_{group_id}_{i}')
|
||||
result.add_media(Media(filename=filename), id=f"{group_id}_{i}")
|
||||
|
||||
filename_dest = os.path.join(tmp_dir, f'{chat}_{group_id}', str(mp.id))
|
||||
|
@ -146,8 +161,10 @@ class TelethonArchiver(Archiver):
|
|||
logger.debug(f"Empty media found, skipping {str(mp)=}")
|
||||
continue
|
||||
result.add_media(Media(filename))
|
||||
|
||||
result.set_content(str(post)).set_title(title).set_timestamp(post.date)
|
||||
|
||||
result.set_title(title).set_timestamp(post.date).set("api_data", post.to_dict())
|
||||
if post.message != title:
|
||||
result.set_content(post.message)
|
||||
return result.success("telethon")
|
||||
|
||||
def _get_media_posts_in_group(self, chat, original_post, max_amp=10):
|
||||
|
|
|
@ -16,36 +16,55 @@ class TwitterApiArchiver(TwitterArchiver, Archiver):
|
|||
def __init__(self, config: dict) -> None:
|
||||
super().__init__(config)
|
||||
|
||||
self.api_index = 0
|
||||
self.apis = []
|
||||
if len(self.bearer_tokens):
|
||||
self.apis.extend([Api(bearer_token=bearer_token) for bearer_token in self.bearer_tokens])
|
||||
if self.bearer_token:
|
||||
self.assert_valid_string("bearer_token")
|
||||
self.api = Api(bearer_token=self.bearer_token)
|
||||
elif self.consumer_key and self.consumer_secret and self.access_token and self.access_secret:
|
||||
self.apis.append(Api(bearer_token=self.bearer_token))
|
||||
if self.consumer_key and self.consumer_secret and self.access_token and self.access_secret:
|
||||
self.assert_valid_string("consumer_key")
|
||||
self.assert_valid_string("consumer_secret")
|
||||
self.assert_valid_string("access_token")
|
||||
self.assert_valid_string("access_secret")
|
||||
self.api = Api(
|
||||
consumer_key=self.consumer_key, consumer_secret=self.consumer_secret, access_token=self.access_token, access_secret=self.access_secret)
|
||||
assert hasattr(self, "api") and self.api is not None, "Missing Twitter API configurations, please provide either bearer_token OR (consumer_key, consumer_secret, access_token, access_secret) to use this archiver."
|
||||
self.apis.append(Api(consumer_key=self.consumer_key, consumer_secret=self.consumer_secret,
|
||||
access_token=self.access_token, access_secret=self.access_secret))
|
||||
assert self.api_client is not None, "Missing Twitter API configurations, please provide either AND/OR (consumer_key, consumer_secret, access_token, access_secret) to use this archiver, you can provide both for better rate-limit results."
|
||||
|
||||
@staticmethod
|
||||
def configs() -> dict:
|
||||
return {
|
||||
"bearer_token": {"default": None, "help": "twitter API bearer_token which is enough for archiving, if not provided you will need consumer_key, consumer_secret, access_token, access_secret"},
|
||||
"bearer_token": {"default": None, "help": "[deprecated: see bearer_tokens] twitter API bearer_token which is enough for archiving, if not provided you will need consumer_key, consumer_secret, access_token, access_secret"},
|
||||
"bearer_tokens": {"default": [], "help": " a list of twitter API bearer_token which is enough for archiving, if not provided you will need consumer_key, consumer_secret, access_token, access_secret, if provided you can still add those for better rate limits. CSV of bearer tokens if provided via the command line", "cli_set": lambda cli_val, cur_val: list(set(cli_val.split(",")))},
|
||||
"consumer_key": {"default": None, "help": "twitter API consumer_key"},
|
||||
"consumer_secret": {"default": None, "help": "twitter API consumer_secret"},
|
||||
"access_token": {"default": None, "help": "twitter API access_token"},
|
||||
"access_secret": {"default": None, "help": "twitter API access_secret"},
|
||||
}
|
||||
|
||||
@property # getter .mimetype
|
||||
def api_client(self) -> str:
|
||||
return self.apis[self.api_index]
|
||||
|
||||
|
||||
def download(self, item: Metadata) -> Metadata:
|
||||
# call download retry until success or no more apis
|
||||
while self.api_index < len(self.apis):
|
||||
if res := self.download_retry(item): return res
|
||||
self.api_index += 1
|
||||
self.api_index = 0
|
||||
return False
|
||||
|
||||
def download_retry(self, item: Metadata) -> Metadata:
|
||||
url = item.get_url()
|
||||
# detect URLs that we definitely cannot handle
|
||||
username, tweet_id = self.get_username_tweet_id(url)
|
||||
if not username: return False
|
||||
|
||||
try:
|
||||
tweet = self.api.get_tweet(tweet_id, expansions=["attachments.media_keys"], media_fields=["type", "duration_ms", "url", "variants"], tweet_fields=["attachments", "author_id", "created_at", "entities", "id", "text", "possibly_sensitive"])
|
||||
tweet = self.api_client.get_tweet(tweet_id, expansions=["attachments.media_keys"], media_fields=["type", "duration_ms", "url", "variants"], tweet_fields=["attachments", "author_id", "created_at", "entities", "id", "text", "possibly_sensitive"])
|
||||
logger.debug(tweet)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not get tweet: {e}")
|
||||
return False
|
||||
|
@ -71,7 +90,7 @@ class TwitterApiArchiver(TwitterArchiver, Archiver):
|
|||
continue
|
||||
logger.info(f"Found media {media}")
|
||||
ext = mimetypes.guess_extension(mimetype)
|
||||
media.filename = self.download_from_url(media.get("src"), f'{slugify(url)}_{i}{ext}', item)
|
||||
media.filename = self.download_from_url(media.get("src"), f'{slugify(url)}_{i}{ext}')
|
||||
result.add_media(media)
|
||||
|
||||
result.set_content(json.dumps({
|
||||
|
|
|
@ -82,7 +82,7 @@ class TwitterArchiver(Archiver):
|
|||
logger.warning(f"Could not get media URL of {tweet_media}")
|
||||
continue
|
||||
ext = mimetypes.guess_extension(mimetype)
|
||||
media.filename = self.download_from_url(media.get("src"), f'{slugify(url)}_{i}{ext}', item)
|
||||
media.filename = self.download_from_url(media.get("src"), f'{slugify(url)}_{i}{ext}')
|
||||
result.add_media(media)
|
||||
|
||||
return result.success("twitter-snscrape")
|
||||
|
@ -124,7 +124,7 @@ class TwitterArchiver(Archiver):
|
|||
if (mtype := mimetypes.guess_type(UrlUtil.remove_get_parameters(u))[0]):
|
||||
ext = mimetypes.guess_extension(mtype)
|
||||
|
||||
media.filename = self.download_from_url(u, f'{slugify(url)}_{i}{ext}', item)
|
||||
media.filename = self.download_from_url(u, f'{slugify(url)}_{i}{ext}')
|
||||
result.add_media(media)
|
||||
|
||||
result.set_title(tweet.get("text")).set_content(json.dumps(tweet, ensure_ascii=False)).set_timestamp(datetime.strptime(tweet["created_at"], "%Y-%m-%dT%H:%M:%S.%fZ"))
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
import datetime, os, yt_dlp
|
||||
import datetime, os, yt_dlp, pysubs2
|
||||
from loguru import logger
|
||||
|
||||
from . import Archiver
|
||||
|
@ -10,38 +10,56 @@ class YoutubeDLArchiver(Archiver):
|
|||
|
||||
def __init__(self, config: dict) -> None:
|
||||
super().__init__(config)
|
||||
self.subtitles = bool(self.subtitles)
|
||||
self.comments = bool(self.comments)
|
||||
self.livestreams = bool(self.livestreams)
|
||||
self.live_from_start = bool(self.live_from_start)
|
||||
self.end_means_success = bool(self.end_means_success)
|
||||
self.allow_playlist = bool(self.allow_playlist)
|
||||
self.max_downloads = self.max_downloads
|
||||
|
||||
@staticmethod
|
||||
def configs() -> dict:
|
||||
return {
|
||||
"facebook_cookie": {"default": None, "help": "optional facebook cookie to have more access to content, from browser, looks like 'cookie: datr= xxxx'"},
|
||||
"subtitles": {"default": True, "help": "download subtitles if available"},
|
||||
"comments": {"default": False, "help": "download all comments if available, may lead to large metadata"},
|
||||
"livestreams": {"default": False, "help": "if set, will download live streams, otherwise will skip them; see --max-filesize for more control"},
|
||||
"live_from_start": {"default": False, "help": "if set, will download live streams from their earliest available moment, otherwise starts now."},
|
||||
"proxy": {"default": "", "help": "http/socks (https seems to not work atm) proxy to use for the webdriver, eg https://proxy-user:password@proxy-ip:port"},
|
||||
"end_means_success": {"default": True, "help": "if True, any archived content will mean a 'success', if False this archiver will not return a 'success' stage; this is useful for cases when the yt-dlp will archive a video but ignore other types of content like images or text only pages that the subsequent archivers can retrieve."},
|
||||
'allow_playlist': {"default": False, "help": "If True will also download playlists, set to False if the expectation is to download a single video."},
|
||||
"max_downloads": {"default": "inf", "help": "Use to limit the number of videos to download when a channel or long page is being extracted. 'inf' means no limit."},
|
||||
}
|
||||
|
||||
def download(self, item: Metadata) -> Metadata:
|
||||
#TODO: yt-dlp for transcripts?
|
||||
url = item.get_url()
|
||||
|
||||
if item.netloc in ['facebook.com', 'www.facebook.com'] and self.facebook_cookie:
|
||||
logger.debug('Using Facebook cookie')
|
||||
yt_dlp.utils.std_headers['cookie'] = self.facebook_cookie
|
||||
|
||||
ydl = yt_dlp.YoutubeDL({'outtmpl': os.path.join(ArchivingContext.get_tmp_dir(), f'%(id)s.%(ext)s'), 'quiet': False, 'noplaylist': True})
|
||||
ydl_options = {'outtmpl': os.path.join(ArchivingContext.get_tmp_dir(), f'%(id)s.%(ext)s'), 'quiet': False, 'noplaylist': not self.allow_playlist , 'writesubtitles': self.subtitles, 'writeautomaticsub': self.subtitles, "live_from_start": self.live_from_start, "proxy": self.proxy, "max_downloads": self.max_downloads, "playlistend": self.max_downloads}
|
||||
ydl = yt_dlp.YoutubeDL(ydl_options) # allsubtitles and subtitleslangs not working as expected, so default lang is always "en"
|
||||
|
||||
try:
|
||||
# don'd download since it can be a live stream
|
||||
# don't download since it can be a live stream
|
||||
info = ydl.extract_info(url, download=False)
|
||||
if info.get('is_live', False):
|
||||
logger.warning("Live streaming media, not archiving now")
|
||||
if info.get('is_live', False) and not self.livestreams:
|
||||
logger.warning("Livestream detected, skipping due to 'livestreams' configuration setting")
|
||||
return False
|
||||
except yt_dlp.utils.DownloadError as e:
|
||||
logger.debug(f'No video - Youtube normal control flow: {e}')
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.debug(f'ytdlp exception which is normal for example a facebook page with images only will cause a IndexError: list index out of range. Exception here is: \n {e}')
|
||||
logger.debug(f'ytdlp exception which is normal for example a facebook page with images only will cause a IndexError: list index out of range. Exception is: \n {e}')
|
||||
return False
|
||||
|
||||
# this time download
|
||||
ydl = yt_dlp.YoutubeDL({**ydl_options, "getcomments": self.comments})
|
||||
#TODO: for playlist or long lists of videos, how to download one at a time so they can be stored before the next one is downloaded?
|
||||
info = ydl.extract_info(url, download=True)
|
||||
|
||||
if "entries" in info:
|
||||
entries = info.get("entries", [])
|
||||
if not len(entries):
|
||||
|
@ -51,11 +69,37 @@ class YoutubeDLArchiver(Archiver):
|
|||
|
||||
result = Metadata()
|
||||
result.set_title(info.get("title"))
|
||||
if "description" in info: result.set_content(info["description"])
|
||||
for entry in entries:
|
||||
filename = ydl.prepare_filename(entry)
|
||||
if not os.path.exists(filename):
|
||||
filename = filename.split('.')[0] + '.mkv'
|
||||
result.add_media(Media(filename).set("duration", info.get("duration")))
|
||||
try:
|
||||
filename = ydl.prepare_filename(entry)
|
||||
if not os.path.exists(filename):
|
||||
filename = filename.split('.')[0] + '.mkv'
|
||||
|
||||
new_media = Media(filename)
|
||||
for x in ["duration", "original_url", "fulltitle", "description", "upload_date"]:
|
||||
if x in entry: new_media.set(x, entry[x])
|
||||
|
||||
# read text from subtitles if enabled
|
||||
if self.subtitles:
|
||||
for lang, val in (info.get('requested_subtitles') or {}).items():
|
||||
try:
|
||||
subs = pysubs2.load(val.get('filepath'), encoding="utf-8")
|
||||
text = " ".join([line.text for line in subs])
|
||||
new_media.set(f"subtitles_{lang}", text)
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading subtitle file {val.get('filepath')}: {e}")
|
||||
result.add_media(new_media)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing entry {entry}: {e}")
|
||||
|
||||
# extract comments if enabled
|
||||
if self.comments:
|
||||
result.set("comments", [{
|
||||
"text": c["text"],
|
||||
"author": c["author"],
|
||||
"timestamp": datetime.datetime.utcfromtimestamp(c.get("timestamp")).replace(tzinfo=datetime.timezone.utc)
|
||||
} for c in info.get("comments", [])])
|
||||
|
||||
if (timestamp := info.get("timestamp")):
|
||||
timestamp = datetime.datetime.utcfromtimestamp(timestamp).replace(tzinfo=datetime.timezone.utc).isoformat()
|
||||
|
@ -64,4 +108,6 @@ class YoutubeDLArchiver(Archiver):
|
|||
upload_date = datetime.datetime.strptime(upload_date, '%Y%m%d').replace(tzinfo=datetime.timezone.utc)
|
||||
result.set("upload_date", upload_date)
|
||||
|
||||
return result.success("yt-dlp")
|
||||
if self.end_means_success: result.success("yt-dlp")
|
||||
else: result.status = "yt-dlp"
|
||||
return result
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
from loguru import logger
|
||||
|
||||
|
||||
class ArchivingContext:
|
||||
"""
|
||||
Singleton context class.
|
||||
|
|
|
@ -25,10 +25,11 @@ class Media:
|
|||
_mimetype: str = None # eg: image/jpeg
|
||||
_stored: bool = field(default=False, repr=False, metadata=config(exclude=lambda _: True)) # always exclude
|
||||
|
||||
def store(self: Media, override_storages: List = None, url: str = "url-not-available"):
|
||||
# stores the media into the provided/available storages [Storage]
|
||||
# repeats the process for its properties, in case they have inner media themselves
|
||||
# for now it only goes down 1 level but it's easy to make it recursive if needed
|
||||
def store(self: Media, override_storages: List = None, url: str = "url-not-available", metadata: Any = None):
|
||||
# 'Any' typing for metadata to avoid circular imports. Stores the media
|
||||
# into the provided/available storages [Storage] repeats the process for
|
||||
# its properties, in case they have inner media themselves for now it
|
||||
# only goes down 1 level but it's easy to make it recursive if needed.
|
||||
storages = override_storages or ArchivingContext.get("storages")
|
||||
if not len(storages):
|
||||
logger.warning(f"No storages found in local context or provided directly for {self.filename}.")
|
||||
|
@ -36,7 +37,7 @@ class Media:
|
|||
|
||||
for s in storages:
|
||||
for any_media in self.all_inner_media(include_self=True):
|
||||
s.store(any_media, url)
|
||||
s.store(any_media, url, metadata=metadata)
|
||||
|
||||
def all_inner_media(self, include_self=False):
|
||||
""" Media can be inside media properties, examples include transformations on original media.
|
||||
|
@ -44,10 +45,14 @@ class Media:
|
|||
"""
|
||||
if include_self: yield self
|
||||
for prop in self.properties.values():
|
||||
if isinstance(prop, Media): yield prop
|
||||
if isinstance(prop, Media):
|
||||
for inner_media in prop.all_inner_media(include_self=True):
|
||||
yield inner_media
|
||||
if isinstance(prop, list):
|
||||
for prop_media in prop:
|
||||
if isinstance(prop_media, Media): yield prop_media
|
||||
if isinstance(prop_media, Media):
|
||||
for inner_media in prop_media.all_inner_media(include_self=True):
|
||||
yield inner_media
|
||||
|
||||
def is_stored(self) -> bool:
|
||||
return len(self.urls) > 0 and len(self.urls) == len(ArchivingContext.get("storages"))
|
||||
|
|
|
@ -7,6 +7,8 @@ from dataclasses_json import dataclass_json, config
|
|||
import datetime
|
||||
from urllib.parse import urlparse
|
||||
from dateutil.parser import parse as parse_dt
|
||||
from loguru import logger
|
||||
|
||||
from .media import Media
|
||||
from .context import ArchivingContext
|
||||
|
||||
|
@ -46,12 +48,18 @@ class Metadata:
|
|||
self.remove_duplicate_media_by_hash()
|
||||
storages = override_storages or ArchivingContext.get("storages")
|
||||
for media in self.media:
|
||||
media.store(override_storages=storages, url=self.get_url())
|
||||
media.store(override_storages=storages, url=self.get_url(), metadata=self)
|
||||
|
||||
def set(self, key: str, val: Any) -> Metadata:
|
||||
self.metadata[key] = val
|
||||
return self
|
||||
|
||||
def append(self, key: str, val: Any) -> Metadata:
|
||||
if key not in self.metadata:
|
||||
self.metadata[key] = []
|
||||
self.metadata[key] = val
|
||||
return self
|
||||
|
||||
def get(self, key: str, default: Any = None, create_if_missing=False) -> Union[Metadata, str]:
|
||||
# goes through metadata and returns the Metadata available
|
||||
if create_if_missing and key not in self.metadata:
|
||||
|
@ -67,7 +75,8 @@ class Metadata:
|
|||
return "success" in self.status
|
||||
|
||||
def is_empty(self) -> bool:
|
||||
return not self.is_success() and len(self.media) == 0 and len(self.metadata) <= 2 # url, processed_at
|
||||
meaningfull_ids = set(self.metadata.keys()) - set(["_processed_at", "url", "total_bytes", "total_size", "archive_duration_seconds"])
|
||||
return not self.is_success() and len(self.media) == 0 and len(meaningfull_ids) == 0
|
||||
|
||||
@property # getter .netloc
|
||||
def netloc(self) -> str:
|
||||
|
@ -106,10 +115,15 @@ class Metadata:
|
|||
def get_timestamp(self, utc=True, iso=True) -> datetime.datetime:
|
||||
ts = self.get("timestamp")
|
||||
if not ts: return
|
||||
if type(ts) == float: ts = datetime.datetime.fromtimestamp(ts)
|
||||
if utc: ts = ts.replace(tzinfo=datetime.timezone.utc)
|
||||
if iso: return ts.isoformat()
|
||||
return ts
|
||||
try:
|
||||
if type(ts) == str: ts = datetime.datetime.fromisoformat(ts)
|
||||
if type(ts) == float: ts = datetime.datetime.fromtimestamp(ts)
|
||||
if utc: ts = ts.replace(tzinfo=datetime.timezone.utc)
|
||||
if iso: return ts.isoformat()
|
||||
return ts
|
||||
except Exception as e:
|
||||
logger.error(f"Unable to parse timestamp {ts}: {e}")
|
||||
return
|
||||
|
||||
def add_media(self, media: Media, id: str = None) -> Metadata:
|
||||
# adds a new media, optionally including an id
|
||||
|
@ -165,3 +179,16 @@ class Metadata:
|
|||
|
||||
def __str__(self) -> str:
|
||||
return self.__repr__()
|
||||
|
||||
|
||||
@staticmethod
|
||||
def choose_most_complete(results: List[Metadata]) -> Metadata:
|
||||
# returns the most complete result from a list of results
|
||||
# prioritizes results with more media, then more metadata
|
||||
if len(results) == 0: return None
|
||||
if len(results) == 1: return results[0]
|
||||
most_complete = results[0]
|
||||
for r in results[1:]:
|
||||
if len(r.media) > len(most_complete.media): most_complete = r
|
||||
elif len(r.media) == len(most_complete.media) and len(r.metadata) > len(most_complete.metadata): most_complete = r
|
||||
return most_complete
|
|
@ -1,5 +1,7 @@
|
|||
from __future__ import annotations
|
||||
from typing import Generator, Union, List
|
||||
from urllib.parse import urlparse
|
||||
from ipaddress import ip_address
|
||||
|
||||
from .context import ArchivingContext
|
||||
|
||||
|
@ -25,13 +27,28 @@ class ArchivingOrchestrator:
|
|||
self.storages: List[Storage] = config.storages
|
||||
ArchivingContext.set("storages", self.storages, keep_on_reset=True)
|
||||
|
||||
for a in self.archivers: a.setup()
|
||||
try:
|
||||
for a in self.all_archivers_for_setup(): a.setup()
|
||||
except (KeyboardInterrupt, Exception) as e:
|
||||
logger.error(f"Error during setup of archivers: {e}\n{traceback.format_exc()}")
|
||||
self.cleanup()
|
||||
|
||||
|
||||
def cleanup(self)->None:
|
||||
logger.info("Cleaning up")
|
||||
for a in self.all_archivers_for_setup(): a.cleanup()
|
||||
|
||||
def feed(self) -> Generator[Metadata]:
|
||||
for item in self.feeder:
|
||||
yield self.feed_item(item)
|
||||
self.cleanup()
|
||||
|
||||
def feed_item(self, item: Metadata) -> Metadata:
|
||||
"""
|
||||
Takes one item (URL) to archive and calls self.archive, additionally:
|
||||
- catches keyboard interruptions to do a clean exit
|
||||
- catches any unexpected error, logs it, and does a clean exit
|
||||
"""
|
||||
try:
|
||||
ArchivingContext.reset()
|
||||
with tempfile.TemporaryDirectory(dir="./") as tmp_dir:
|
||||
|
@ -41,81 +58,101 @@ class ArchivingOrchestrator:
|
|||
# catches keyboard interruptions to do a clean exit
|
||||
logger.warning(f"caught interrupt on {item=}")
|
||||
for d in self.databases: d.aborted(item)
|
||||
self.cleanup()
|
||||
exit()
|
||||
except Exception as e:
|
||||
logger.error(f'Got unexpected error on item {item}: {e}\n{traceback.format_exc()}')
|
||||
for d in self.databases: d.failed(item)
|
||||
for d in self.databases:
|
||||
if type(e) == AssertionError: d.failed(item, str(e))
|
||||
else: d.failed(item)
|
||||
|
||||
# how does this handle the parameters like folder which can be different for each archiver?
|
||||
# the storage needs to know where to archive!!
|
||||
# solution: feeders have context: extra metadata that they can read or ignore,
|
||||
# all of it should have sensible defaults (eg: folder)
|
||||
# default feeder is a list with 1 element
|
||||
|
||||
def archive(self, result: Metadata) -> Union[Metadata, None]:
|
||||
original_url = result.get_url()
|
||||
"""
|
||||
Runs the archiving process for a single URL
|
||||
1. Each archiver can sanitize its own URLs
|
||||
2. Check for cached results in Databases, and signal start to the databases
|
||||
3. Call Archivers until one succeeds
|
||||
4. Call Enrichers
|
||||
5. Store all downloaded/generated media
|
||||
6. Call selected Formatter and store formatted if needed
|
||||
"""
|
||||
original_url = result.get_url().strip()
|
||||
self.assert_valid_url(original_url)
|
||||
|
||||
# 1 - cleanup
|
||||
# each archiver is responsible for cleaning/expanding its own URLs
|
||||
# 1 - sanitize - each archiver is responsible for cleaning/expanding its own URLs
|
||||
url = original_url
|
||||
for a in self.archivers: url = a.sanitize_url(url)
|
||||
result.set_url(url)
|
||||
if original_url != url: result.set("original_url", original_url)
|
||||
|
||||
# 2 - notify start to DB
|
||||
# signal to DB that archiving has started
|
||||
# and propagate already archived if it exists
|
||||
# 2 - notify start to DBs, propagate already archived if feature enabled in DBs
|
||||
cached_result = None
|
||||
for d in self.databases:
|
||||
# are the databases to decide whether to archive?
|
||||
# they can simply return True by default, otherwise they can avoid duplicates. should this logic be more granular, for example on the archiver level: a tweet will not need be scraped twice, whereas an instagram profile might. the archiver could not decide from the link which parts to archive,
|
||||
# instagram profile example: it would always re-archive everything
|
||||
# maybe the database/storage could use a hash/key to decide if there's a need to re-archive
|
||||
d.started(result)
|
||||
if (local_result := d.fetch(result)):
|
||||
cached_result = (cached_result or Metadata()).merge(local_result)
|
||||
if cached_result:
|
||||
logger.debug("Found previously archived entry")
|
||||
for d in self.databases:
|
||||
d.done(cached_result, cached=True)
|
||||
try: d.done(cached_result, cached=True)
|
||||
except Exception as e:
|
||||
logger.error(f"ERROR database {d.name}: {e}: {traceback.format_exc()}")
|
||||
return cached_result
|
||||
|
||||
# 3 - call archivers until one succeeds
|
||||
for a in self.archivers:
|
||||
logger.info(f"Trying archiver {a.name} for {url}")
|
||||
try:
|
||||
# Q: should this be refactored so it's just a.download(result)?
|
||||
result.merge(a.download(result))
|
||||
if result.is_success(): break
|
||||
except Exception as e: logger.error(f"Unexpected error with archiver {a.name}: {e}: {traceback.format_exc()}")
|
||||
except Exception as e:
|
||||
logger.error(f"ERROR archiver {a.name}: {e}: {traceback.format_exc()}")
|
||||
|
||||
# what if an archiver returns multiple entries and one is to be part of HTMLgenerator?
|
||||
# should it call the HTMLgenerator as if it's not an enrichment?
|
||||
# eg: if it is enable: generates an HTML with all the returned media, should it include enrichers? yes
|
||||
# then how to execute it last? should there also be post-processors? are there other examples?
|
||||
# maybe as a PDF? or a Markdown file
|
||||
|
||||
# 4 - call enrichers: have access to archived content, can generate metadata and Media
|
||||
# eg: screenshot, wacz, webarchive, thumbnails
|
||||
# 4 - call enrichers to work with archived content
|
||||
for e in self.enrichers:
|
||||
try: e.enrich(result)
|
||||
except Exception as exc: logger.error(f"Unexpected error with enricher {e.name}: {exc}: {traceback.format_exc()}")
|
||||
except Exception as exc:
|
||||
logger.error(f"ERROR enricher {e.name}: {exc}: {traceback.format_exc()}")
|
||||
|
||||
# 5 - store media
|
||||
# looks for Media in result.media and also result.media[x].properties (as list or dict values)
|
||||
# 5 - store all downloaded/generated media
|
||||
result.store()
|
||||
|
||||
|
||||
# 6 - format and store formatted if needed
|
||||
# enrichers typically need access to already stored URLs etc
|
||||
if (final_media := self.formatter.format(result)):
|
||||
final_media.store(url=url)
|
||||
final_media.store(url=url, metadata=result)
|
||||
result.set_final_media(final_media)
|
||||
|
||||
if result.is_empty():
|
||||
result.status = "nothing archived"
|
||||
|
||||
# signal completion to databases (DBs, Google Sheets, CSV, ...)
|
||||
for d in self.databases: d.done(result)
|
||||
# signal completion to databases and archivers
|
||||
for d in self.databases:
|
||||
try: d.done(result)
|
||||
except Exception as e:
|
||||
logger.error(f"ERROR database {d.name}: {e}: {traceback.format_exc()}")
|
||||
|
||||
return result
|
||||
|
||||
def assert_valid_url(self, url: str) -> bool:
|
||||
"""
|
||||
Blocks localhost, private, reserved, and link-local IPs and all non-http/https schemes.
|
||||
"""
|
||||
assert url.startswith("http://") or url.startswith("https://"), f"Invalid URL scheme"
|
||||
|
||||
parsed = urlparse(url)
|
||||
assert parsed.scheme in ["http", "https"], f"Invalid URL scheme"
|
||||
assert parsed.hostname, f"Invalid URL hostname"
|
||||
assert parsed.hostname != "localhost", f"Invalid URL"
|
||||
|
||||
try: # special rules for IP addresses
|
||||
ip = ip_address(parsed.hostname)
|
||||
except ValueError: pass
|
||||
else:
|
||||
assert ip.is_global, f"Invalid IP used"
|
||||
assert not ip.is_reserved, f"Invalid IP used"
|
||||
assert not ip.is_link_local, f"Invalid IP used"
|
||||
assert not ip.is_private, f"Invalid IP used"
|
||||
|
||||
def all_archivers_for_setup(self) -> List[Archiver]:
|
||||
return self.archivers + [e for e in self.enrichers if isinstance(e, Archiver)]
|
|
@ -2,4 +2,5 @@ from .database import Database
|
|||
from .gsheet_db import GsheetsDb
|
||||
from .console_db import ConsoleDb
|
||||
from .csv_db import CSVDb
|
||||
from .api_db import AAApiDb
|
||||
from .api_db import AAApiDb
|
||||
from .atlos_db import AtlosDb
|
|
@ -16,51 +16,53 @@ class AAApiDb(Database):
|
|||
# without this STEP.__init__ is not called
|
||||
super().__init__(config)
|
||||
self.allow_rearchive = bool(self.allow_rearchive)
|
||||
self.store_results = bool(self.store_results)
|
||||
self.assert_valid_string("api_endpoint")
|
||||
self.assert_valid_string("api_secret")
|
||||
|
||||
@staticmethod
|
||||
def configs() -> dict:
|
||||
return {
|
||||
"api_endpoint": {"default": None, "help": "API endpoint where calls are made to"},
|
||||
"api_secret": {"default": None, "help": "API Basic authentication secret [deprecating soon]"},
|
||||
"api_token": {"default": None, "help": "API Bearer token, to be preferred over secret (Basic auth) going forward"},
|
||||
"api_token": {"default": None, "help": "API Bearer token."},
|
||||
"public": {"default": False, "help": "whether the URL should be publicly available via the API"},
|
||||
"author_id": {"default": None, "help": "which email to assign as author"},
|
||||
"group_id": {"default": None, "help": "which group of users have access to the archive in case public=false as author"},
|
||||
"allow_rearchive": {"default": True, "help": "if False then the API database will be queried prior to any archiving operations and stop if the link has already been archived"},
|
||||
"store_results": {"default": True, "help": "when set, will send the results to the API database."},
|
||||
"tags": {"default": [], "help": "what tags to add to the archived URL", "cli_set": lambda cli_val, cur_val: set(cli_val.split(","))},
|
||||
}
|
||||
def fetch(self, item: Metadata) -> Union[Metadata, bool]:
|
||||
""" query the database for the existence of this item"""
|
||||
if not self.allow_rearchive: return
|
||||
|
||||
params = {"url": item.get_url(), "limit": 1}
|
||||
params = {"url": item.get_url(), "limit": 15}
|
||||
headers = {"Authorization": f"Bearer {self.api_token}", "accept": "application/json"}
|
||||
response = requests.get(os.path.join(self.api_endpoint, "tasks/search-url"), params=params, headers=headers)
|
||||
|
||||
if response.status_code == 200:
|
||||
logger.success(f"API returned a previously archived instance: {response.json()}")
|
||||
# TODO: can we do better than just returning the most recent result?
|
||||
return Metadata.from_dict(response.json()[0]["result"])
|
||||
|
||||
logger.error(f"AA API FAIL ({response.status_code}): {response.json()}")
|
||||
if len(response.json()):
|
||||
logger.success(f"API returned {len(response.json())} previously archived instance(s)")
|
||||
fetched_metadata = [Metadata.from_dict(r["result"]) for r in response.json()]
|
||||
return Metadata.choose_most_complete(fetched_metadata)
|
||||
else:
|
||||
logger.error(f"AA API FAIL ({response.status_code}): {response.json()}")
|
||||
return False
|
||||
|
||||
|
||||
def done(self, item: Metadata, cached: bool=False) -> None:
|
||||
"""archival result ready - should be saved to DB"""
|
||||
if not self.store_results: return
|
||||
if cached:
|
||||
logger.debug(f"skipping saving archive of {item.get_url()} to the AA API because it was cached")
|
||||
return
|
||||
logger.debug(f"saving archive of {item.get_url()} to the AA API.")
|
||||
|
||||
payload = {'result': item.to_json(), 'public': self.public, 'author_id': self.author_id, 'group_id': self.group_id, 'tags': list(self.tags)}
|
||||
response = requests.post(os.path.join(self.api_endpoint, "submit-archive"), json=payload, auth=("abc", self.api_secret))
|
||||
headers = {"Authorization": f"Bearer {self.api_token}"}
|
||||
response = requests.post(os.path.join(self.api_endpoint, "submit-archive"), json=payload, headers=headers)
|
||||
|
||||
if response.status_code == 200:
|
||||
logger.success(f"AA API: {response.json()}")
|
||||
else:
|
||||
logger.error(f"AA API FAIL ({response.status_code}): {response.json()}")
|
||||
|
||||
|
|
@ -0,0 +1,79 @@
|
|||
import os
|
||||
from typing import Union
|
||||
from loguru import logger
|
||||
from csv import DictWriter
|
||||
from dataclasses import asdict
|
||||
import requests
|
||||
|
||||
from . import Database
|
||||
from ..core import Metadata
|
||||
from ..utils import get_atlos_config_options
|
||||
|
||||
|
||||
class AtlosDb(Database):
|
||||
"""
|
||||
Outputs results to Atlos
|
||||
"""
|
||||
|
||||
name = "atlos_db"
|
||||
|
||||
def __init__(self, config: dict) -> None:
|
||||
# without this STEP.__init__ is not called
|
||||
super().__init__(config)
|
||||
|
||||
@staticmethod
|
||||
def configs() -> dict:
|
||||
return get_atlos_config_options()
|
||||
|
||||
def failed(self, item: Metadata, reason: str) -> None:
|
||||
"""Update DB accordingly for failure"""
|
||||
# If the item has no Atlos ID, there's nothing for us to do
|
||||
if not item.metadata.get("atlos_id"):
|
||||
logger.info(f"Item {item.get_url()} has no Atlos ID, skipping")
|
||||
return
|
||||
|
||||
requests.post(
|
||||
f"{self.atlos_url}/api/v2/source_material/metadata/{item.metadata['atlos_id']}/auto_archiver",
|
||||
headers={"Authorization": f"Bearer {self.api_token}"},
|
||||
json={"metadata": {"processed": True, "status": "error", "error": reason}},
|
||||
).raise_for_status()
|
||||
logger.info(
|
||||
f"Stored failure for {item.get_url()} (ID {item.metadata['atlos_id']}) on Atlos: {reason}"
|
||||
)
|
||||
|
||||
def fetch(self, item: Metadata) -> Union[Metadata, bool]:
|
||||
"""check and fetch if the given item has been archived already, each
|
||||
database should handle its own caching, and configuration mechanisms"""
|
||||
return False
|
||||
|
||||
def _process_metadata(self, item: Metadata) -> dict:
|
||||
"""Process metadata for storage on Atlos. Will convert any datetime
|
||||
objects to ISO format."""
|
||||
|
||||
return {
|
||||
k: v.isoformat() if hasattr(v, "isoformat") else v
|
||||
for k, v in item.metadata.items()
|
||||
}
|
||||
|
||||
def done(self, item: Metadata, cached: bool = False) -> None:
|
||||
"""archival result ready - should be saved to DB"""
|
||||
|
||||
if not item.metadata.get("atlos_id"):
|
||||
logger.info(f"Item {item.get_url()} has no Atlos ID, skipping")
|
||||
return
|
||||
|
||||
requests.post(
|
||||
f"{self.atlos_url}/api/v2/source_material/metadata/{item.metadata['atlos_id']}/auto_archiver",
|
||||
headers={"Authorization": f"Bearer {self.api_token}"},
|
||||
json={
|
||||
"metadata": dict(
|
||||
processed=True,
|
||||
status="success",
|
||||
results=self._process_metadata(item),
|
||||
)
|
||||
},
|
||||
).raise_for_status()
|
||||
|
||||
logger.info(
|
||||
f"Stored success for {item.get_url()} (ID {item.metadata['atlos_id']}) on Atlos"
|
||||
)
|
|
@ -21,8 +21,8 @@ class ConsoleDb(Database):
|
|||
def started(self, item: Metadata) -> None:
|
||||
logger.warning(f"STARTED {item}")
|
||||
|
||||
def failed(self, item: Metadata) -> None:
|
||||
logger.error(f"FAILED {item}")
|
||||
def failed(self, item: Metadata, reason:str) -> None:
|
||||
logger.error(f"FAILED {item}: {reason}")
|
||||
|
||||
def aborted(self, item: Metadata) -> None:
|
||||
logger.warning(f"ABORTED {item}")
|
||||
|
|
|
@ -22,7 +22,7 @@ class Database(Step, ABC):
|
|||
"""signals the DB that the given item archival has started"""
|
||||
pass
|
||||
|
||||
def failed(self, item: Metadata) -> None:
|
||||
def failed(self, item: Metadata, reason:str) -> None:
|
||||
"""update DB accordingly for failure"""
|
||||
pass
|
||||
|
||||
|
@ -32,7 +32,7 @@ class Database(Step, ABC):
|
|||
|
||||
# @abstractmethod
|
||||
def fetch(self, item: Metadata) -> Union[Metadata, bool]:
|
||||
"""check if the given item has been archived already"""
|
||||
"""check and fetch if the given item has been archived already, each database should handle its own caching, and configuration mechanisms"""
|
||||
return False
|
||||
|
||||
@abstractmethod
|
||||
|
|
|
@ -29,9 +29,9 @@ class GsheetsDb(Database):
|
|||
gw, row = self._retrieve_gsheet(item)
|
||||
gw.set_cell(row, 'status', 'Archive in progress')
|
||||
|
||||
def failed(self, item: Metadata) -> None:
|
||||
def failed(self, item: Metadata, reason:str) -> None:
|
||||
logger.error(f"FAILED {item}")
|
||||
self._safe_status_update(item, 'Archive failed')
|
||||
self._safe_status_update(item, f'Archive failed {reason}')
|
||||
|
||||
def aborted(self, item: Metadata) -> None:
|
||||
logger.warning(f"ABORTED {item}")
|
||||
|
@ -102,6 +102,11 @@ class GsheetsDb(Database):
|
|||
|
||||
def _retrieve_gsheet(self, item: Metadata) -> Tuple[GWorksheet, int]:
|
||||
# TODO: to make gsheet_db less coupled with gsheet_feeder's "gsheet" parameter, this method could 1st try to fetch "gsheet" from ArchivingContext and, if missing, manage its own singleton - not needed for now
|
||||
gw: GWorksheet = ArchivingContext.get("gsheet").get("worksheet")
|
||||
row: int = ArchivingContext.get("gsheet").get("row")
|
||||
if gsheet := ArchivingContext.get("gsheet"):
|
||||
gw: GWorksheet = gsheet.get("worksheet")
|
||||
row: int = gsheet.get("row")
|
||||
elif self.sheet_id:
|
||||
print(self.sheet_id)
|
||||
|
||||
|
||||
return gw, row
|
||||
|
|
|
@ -6,4 +6,7 @@ from .thumbnail_enricher import ThumbnailEnricher
|
|||
from .wacz_enricher import WaczArchiverEnricher
|
||||
from .whisper_enricher import WhisperEnricher
|
||||
from .pdq_hash_enricher import PdqHashEnricher
|
||||
from .metadata_enricher import MetadataEnricher
|
||||
from .metadata_enricher import MetadataEnricher
|
||||
from .meta_enricher import MetaEnricher
|
||||
from .ssl_enricher import SSLEnricher
|
||||
from .timestamping_enricher import TimestampingEnricher
|
|
@ -0,0 +1,59 @@
|
|||
import datetime
|
||||
import os
|
||||
from loguru import logger
|
||||
|
||||
from . import Enricher
|
||||
from ..core import Metadata
|
||||
|
||||
|
||||
class MetaEnricher(Enricher):
|
||||
"""
|
||||
Adds metadata information about the archive operations, to be included at the end of all enrichments
|
||||
"""
|
||||
name = "meta_enricher"
|
||||
|
||||
|
||||
def __init__(self, config: dict) -> None:
|
||||
# without this STEP.__init__ is not called
|
||||
super().__init__(config)
|
||||
|
||||
@staticmethod
|
||||
def configs() -> dict:
|
||||
return {}
|
||||
|
||||
def enrich(self, to_enrich: Metadata) -> None:
|
||||
url = to_enrich.get_url()
|
||||
if to_enrich.is_empty():
|
||||
logger.debug(f"[SKIP] META_ENRICHER there is no media or metadata to enrich: {url=}")
|
||||
return
|
||||
|
||||
logger.debug(f"calculating archive metadata information for {url=}")
|
||||
|
||||
self.enrich_file_sizes(to_enrich)
|
||||
self.enrich_archive_duration(to_enrich)
|
||||
|
||||
def enrich_file_sizes(self, to_enrich: Metadata):
|
||||
logger.debug(f"calculating archive file sizes for url={to_enrich.get_url()} ({len(to_enrich.media)} media files)")
|
||||
total_size = 0
|
||||
for media in to_enrich.get_all_media():
|
||||
file_stats = os.stat(media.filename)
|
||||
media.set("bytes", file_stats.st_size)
|
||||
media.set("size", self.human_readable_bytes(file_stats.st_size))
|
||||
total_size += file_stats.st_size
|
||||
|
||||
to_enrich.set("total_bytes", total_size)
|
||||
to_enrich.set("total_size", self.human_readable_bytes(total_size))
|
||||
|
||||
|
||||
def human_readable_bytes(self, size: int) -> str:
|
||||
# receives number of bytes and returns human readble size
|
||||
for unit in ["bytes", "KB", "MB", "GB", "TB"]:
|
||||
if size < 1024:
|
||||
return f"{size:.1f} {unit}"
|
||||
size /= 1024
|
||||
|
||||
def enrich_archive_duration(self, to_enrich):
|
||||
logger.debug(f"calculating archive duration for url={to_enrich.get_url()} ")
|
||||
|
||||
archive_duration = datetime.datetime.utcnow() - to_enrich.get("_processed_at")
|
||||
to_enrich.set("archive_duration_seconds", archive_duration.seconds)
|
|
@ -16,7 +16,8 @@ class ScreenshotEnricher(Enricher):
|
|||
"width": {"default": 1280, "help": "width of the screenshots"},
|
||||
"height": {"default": 720, "help": "height of the screenshots"},
|
||||
"timeout": {"default": 60, "help": "timeout for taking the screenshot"},
|
||||
"sleep_before_screenshot": {"default": 4, "help": "seconds to wait for the pages to load before taking screenshot"}
|
||||
"sleep_before_screenshot": {"default": 4, "help": "seconds to wait for the pages to load before taking screenshot"},
|
||||
"http_proxy": {"default": "", "help": "http proxy to use for the webdriver, eg http://proxy-user:password@proxy-ip:port"},
|
||||
}
|
||||
|
||||
def enrich(self, to_enrich: Metadata) -> None:
|
||||
|
@ -26,7 +27,7 @@ class ScreenshotEnricher(Enricher):
|
|||
return
|
||||
|
||||
logger.debug(f"Enriching screenshot for {url=}")
|
||||
with Webdriver(self.width, self.height, self.timeout, 'facebook.com' in url) as driver:
|
||||
with Webdriver(self.width, self.height, self.timeout, 'facebook.com' in url, http_proxy=self.http_proxy) as driver:
|
||||
try:
|
||||
driver.get(url)
|
||||
time.sleep(int(self.sleep_before_screenshot))
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
import ssl, os
|
||||
from slugify import slugify
|
||||
from urllib.parse import urlparse
|
||||
from loguru import logger
|
||||
|
||||
from . import Enricher
|
||||
from ..core import Metadata, ArchivingContext, Media
|
||||
|
||||
|
||||
class SSLEnricher(Enricher):
|
||||
"""
|
||||
Retrieves SSL certificate information for a domain, as a file
|
||||
"""
|
||||
name = "ssl_enricher"
|
||||
|
||||
def __init__(self, config: dict) -> None:
|
||||
super().__init__(config)
|
||||
self. skip_when_nothing_archived = bool(self.skip_when_nothing_archived)
|
||||
|
||||
@staticmethod
|
||||
def configs() -> dict:
|
||||
return {
|
||||
"skip_when_nothing_archived": {"default": True, "help": "if true, will skip enriching when no media is archived"},
|
||||
}
|
||||
|
||||
def enrich(self, to_enrich: Metadata) -> None:
|
||||
if not to_enrich.media and self.skip_when_nothing_archived: return
|
||||
|
||||
url = to_enrich.get_url()
|
||||
parsed = urlparse(url)
|
||||
assert parsed.scheme in ["https"], f"Invalid URL scheme {url=}"
|
||||
|
||||
domain = parsed.netloc
|
||||
logger.debug(f"fetching SSL certificate for {domain=} in {url=}")
|
||||
|
||||
cert = ssl.get_server_certificate((domain, 443))
|
||||
cert_fn = os.path.join(ArchivingContext.get_tmp_dir(), f"{slugify(domain)}.pem")
|
||||
with open(cert_fn, "w") as f: f.write(cert)
|
||||
to_enrich.add_media(Media(filename=cert_fn), id="ssl_certificate")
|
|
@ -15,32 +15,54 @@ class ThumbnailEnricher(Enricher):
|
|||
def __init__(self, config: dict) -> None:
|
||||
# without this STEP.__init__ is not called
|
||||
super().__init__(config)
|
||||
self.thumbnails_per_second = int(self.thumbnails_per_minute) / 60
|
||||
self.max_thumbnails = int(self.max_thumbnails)
|
||||
|
||||
@staticmethod
|
||||
def configs() -> dict:
|
||||
return {}
|
||||
|
||||
return {
|
||||
"thumbnails_per_minute": {"default": 60, "help": "how many thumbnails to generate per minute of video, can be limited by max_thumbnails"},
|
||||
"max_thumbnails": {"default": 16, "help": "limit the number of thumbnails to generate per video, 0 means no limit"},
|
||||
}
|
||||
|
||||
def enrich(self, to_enrich: Metadata) -> None:
|
||||
logger.debug(f"generating thumbnails")
|
||||
for i, m in enumerate(to_enrich.media[::]):
|
||||
"""
|
||||
Uses or reads the video duration to generate thumbnails
|
||||
Calculates how many thumbnails to generate and at which timestamps based on the video duration, the number of thumbnails per minute and the max number of thumbnails.
|
||||
Thumbnails are equally distributed across the video duration.
|
||||
"""
|
||||
logger.debug(f"generating thumbnails for {to_enrich.get_url()}")
|
||||
for m_id, m in enumerate(to_enrich.media[::]):
|
||||
if m.is_video():
|
||||
folder = os.path.join(ArchivingContext.get_tmp_dir(), random_str(24))
|
||||
os.makedirs(folder, exist_ok=True)
|
||||
logger.debug(f"generating thumbnails for {m.filename}")
|
||||
fps, duration = 0.5, m.get("duration")
|
||||
if duration is not None:
|
||||
duration = float(duration)
|
||||
if duration < 60: fps = 10.0 / duration
|
||||
elif duration < 120: fps = 20.0 / duration
|
||||
else: fps = 40.0 / duration
|
||||
duration = m.get("duration")
|
||||
|
||||
stream = ffmpeg.input(m.filename)
|
||||
stream = ffmpeg.filter(stream, 'fps', fps=fps).filter('scale', 512, -1)
|
||||
stream.output(os.path.join(folder, 'out%d.jpg')).run()
|
||||
if duration is None:
|
||||
try:
|
||||
probe = ffmpeg.probe(m.filename)
|
||||
duration = float(next(stream for stream in probe['streams'] if stream['codec_type'] == 'video')['duration'])
|
||||
to_enrich.media[m_id].set("duration", duration)
|
||||
except Exception as e:
|
||||
logger.error(f"error getting duration of video {m.filename}: {e}")
|
||||
return
|
||||
|
||||
num_thumbs = int(min(max(1, duration * self.thumbnails_per_second), self.max_thumbnails))
|
||||
timestamps = [duration / (num_thumbs + 1) * i for i in range(1, num_thumbs + 1)]
|
||||
|
||||
thumbnails = os.listdir(folder)
|
||||
thumbnails_media = []
|
||||
for t, fname in enumerate(thumbnails):
|
||||
if fname[-3:] == 'jpg':
|
||||
thumbnails_media.append(Media(filename=os.path.join(folder, fname)).set("id", f"thumbnail_{t}"))
|
||||
to_enrich.media[i].set("thumbnails", thumbnails_media)
|
||||
for index, timestamp in enumerate(timestamps):
|
||||
output_path = os.path.join(folder, f"out{index}.jpg")
|
||||
ffmpeg.input(m.filename, ss=timestamp).filter('scale', 512, -1).output(output_path, vframes=1, loglevel="quiet").run()
|
||||
|
||||
try:
|
||||
thumbnails_media.append(Media(
|
||||
filename=output_path)
|
||||
.set("id", f"thumbnail_{index}")
|
||||
.set("timestamp", "%.3fs" % timestamp)
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"error creating thumbnail {index} for media: {e}")
|
||||
|
||||
to_enrich.media[m_id].set("thumbnails", thumbnails_media)
|
||||
|
|
|
@ -0,0 +1,136 @@
|
|||
import os
|
||||
from loguru import logger
|
||||
from tsp_client import TSPSigner, SigningSettings, TSPVerifier
|
||||
from tsp_client.algorithms import DigestAlgorithm
|
||||
from importlib.metadata import version
|
||||
from asn1crypto.cms import ContentInfo
|
||||
from certvalidator import CertificateValidator, ValidationContext
|
||||
from asn1crypto import pem
|
||||
import certifi
|
||||
|
||||
from . import Enricher
|
||||
from ..core import Metadata, ArchivingContext, Media
|
||||
from ..archivers import Archiver
|
||||
|
||||
|
||||
class TimestampingEnricher(Enricher):
|
||||
"""
|
||||
Uses several RFC3161 Time Stamp Authorities to generate a timestamp token that will be preserved. This can be used to prove that a certain file existed at a certain time, useful for legal purposes, for example, to prove that a certain file was not tampered with after a certain date.
|
||||
|
||||
The information that gets timestamped is concatenation (via paragraphs) of the file hashes existing in the current archive. It will depend on which archivers and enrichers ran before this one. Inner media files (like thumbnails) are not included in the .txt file. It should run AFTER the hash_enricher.
|
||||
|
||||
See https://gist.github.com/Manouchehri/fd754e402d98430243455713efada710 for list of timestamp authorities.
|
||||
"""
|
||||
name = "timestamping_enricher"
|
||||
|
||||
def __init__(self, config: dict) -> None:
|
||||
super().__init__(config)
|
||||
|
||||
@staticmethod
|
||||
def configs() -> dict:
|
||||
return {
|
||||
"tsa_urls": {
|
||||
"default": [
|
||||
# [Adobe Approved Trust List] and [Windows Cert Store]
|
||||
"http://timestamp.digicert.com",
|
||||
"http://timestamp.identrust.com",
|
||||
# "https://timestamp.entrust.net/TSS/RFC3161sha2TS", # not valid for timestamping
|
||||
# "https://timestamp.sectigo.com", # wait 15 seconds between each request.
|
||||
|
||||
# [Adobe: European Union Trusted Lists].
|
||||
# "https://timestamp.sectigo.com/qualified", # wait 15 seconds between each request.
|
||||
|
||||
# [Windows Cert Store]
|
||||
"http://timestamp.globalsign.com/tsa/r6advanced1",
|
||||
|
||||
# [Adobe: European Union Trusted Lists] and [Windows Cert Store]
|
||||
# "http://ts.quovadisglobal.com/eu", # not valid for timestamping
|
||||
# "http://tsa.belgium.be/connect", # self-signed certificate in certificate chain
|
||||
# "https://timestamp.aped.gov.gr/qtss", # self-signed certificate in certificate chain
|
||||
# "http://tsa.sep.bg", # self-signed certificate in certificate chain
|
||||
# "http://tsa.izenpe.com", #unable to get local issuer certificate
|
||||
# "http://kstamp.keynectis.com/KSign", # unable to get local issuer certificate
|
||||
"http://tss.accv.es:8318/tsa",
|
||||
],
|
||||
"help": "List of RFC3161 Time Stamp Authorities to use, separate with commas if passed via the command line.",
|
||||
"cli_set": lambda cli_val, cur_val: set(cli_val.split(","))
|
||||
}
|
||||
}
|
||||
|
||||
def enrich(self, to_enrich: Metadata) -> None:
|
||||
url = to_enrich.get_url()
|
||||
logger.debug(f"RFC3161 timestamping existing files for {url=}")
|
||||
|
||||
# create a new text file with the existing media hashes
|
||||
hashes = [m.get("hash").replace("SHA-256:", "").replace("SHA3-512:", "") for m in to_enrich.media if m.get("hash")]
|
||||
|
||||
if not len(hashes):
|
||||
logger.warning(f"No hashes found in {url=}")
|
||||
return
|
||||
|
||||
tmp_dir = ArchivingContext.get_tmp_dir()
|
||||
hashes_fn = os.path.join(tmp_dir, "hashes.txt")
|
||||
|
||||
data_to_sign = "\n".join(hashes)
|
||||
with open(hashes_fn, "w") as f:
|
||||
f.write(data_to_sign)
|
||||
hashes_media = Media(filename=hashes_fn)
|
||||
|
||||
timestamp_tokens = []
|
||||
from slugify import slugify
|
||||
for tsa_url in self.tsa_urls:
|
||||
try:
|
||||
signing_settings = SigningSettings(tsp_server=tsa_url, digest_algorithm=DigestAlgorithm.SHA256)
|
||||
signer = TSPSigner()
|
||||
message = bytes(data_to_sign, encoding='utf8')
|
||||
# send TSQ and get TSR from the TSA server
|
||||
signed = signer.sign(message=message, signing_settings=signing_settings)
|
||||
# fail if there's any issue with the certificates, uses certifi list of trusted CAs
|
||||
TSPVerifier(certifi.where()).verify(signed, message=message)
|
||||
# download and verify timestamping certificate
|
||||
cert_chain = self.download_and_verify_certificate(signed)
|
||||
# continue with saving the timestamp token
|
||||
tst_fn = os.path.join(tmp_dir, f"timestamp_token_{slugify(tsa_url)}")
|
||||
with open(tst_fn, "wb") as f: f.write(signed)
|
||||
timestamp_tokens.append(Media(filename=tst_fn).set("tsa", tsa_url).set("cert_chain", cert_chain))
|
||||
except Exception as e:
|
||||
logger.warning(f"Error while timestamping {url=} with {tsa_url=}: {e}")
|
||||
|
||||
if len(timestamp_tokens):
|
||||
hashes_media.set("timestamp_authority_files", timestamp_tokens)
|
||||
hashes_media.set("certifi v", version("certifi"))
|
||||
hashes_media.set("tsp_client v", version("tsp_client"))
|
||||
hashes_media.set("certvalidator v", version("certvalidator"))
|
||||
to_enrich.add_media(hashes_media, id="timestamped_hashes")
|
||||
to_enrich.set("timestamped", True)
|
||||
logger.success(f"{len(timestamp_tokens)} timestamp tokens created for {url=}")
|
||||
else:
|
||||
logger.warning(f"No successful timestamps for {url=}")
|
||||
|
||||
def download_and_verify_certificate(self, signed: bytes) -> list[Media]:
|
||||
# returns the leaf certificate URL, fails if not set
|
||||
tst = ContentInfo.load(signed)
|
||||
|
||||
trust_roots = []
|
||||
with open(certifi.where(), 'rb') as f:
|
||||
for _, _, der_bytes in pem.unarmor(f.read(), multiple=True):
|
||||
trust_roots.append(der_bytes)
|
||||
context = ValidationContext(trust_roots=trust_roots)
|
||||
|
||||
certificates = tst["content"]["certificates"]
|
||||
first_cert = certificates[0].dump()
|
||||
intermediate_certs = []
|
||||
for i in range(1, len(certificates)): # cannot use list comprehension [1:]
|
||||
intermediate_certs.append(certificates[i].dump())
|
||||
|
||||
validator = CertificateValidator(first_cert, intermediate_certs=intermediate_certs, validation_context=context)
|
||||
path = validator.validate_usage({'digital_signature'}, extended_key_usage={'time_stamping'})
|
||||
|
||||
cert_chain = []
|
||||
for cert in path:
|
||||
cert_fn = os.path.join(ArchivingContext.get_tmp_dir(), f"{str(cert.serial_number)[:20]}.crt")
|
||||
with open(cert_fn, "wb") as f:
|
||||
f.write(cert.dump())
|
||||
cert_chain.append(Media(filename=cert_fn).set("subject", cert.subject.native["common_name"]))
|
||||
|
||||
return cert_chain
|
|
@ -30,8 +30,27 @@ class WaczArchiverEnricher(Enricher, Archiver):
|
|||
"profile": {"default": None, "help": "browsertrix-profile (for profile generation see https://github.com/webrecorder/browsertrix-crawler#creating-and-using-browser-profiles)."},
|
||||
"docker_commands": {"default": None, "help":"if a custom docker invocation is needed"},
|
||||
"timeout": {"default": 120, "help": "timeout for WACZ generation in seconds"},
|
||||
"extract_media": {"default": True, "help": "If enabled all the images/videos/audio present in the WACZ archive will be extracted into separate Media. The .wacz file will be kept untouched."}
|
||||
"extract_media": {"default": False, "help": "If enabled all the images/videos/audio present in the WACZ archive will be extracted into separate Media and appear in the html report. The .wacz file will be kept untouched."},
|
||||
"extract_screenshot": {"default": True, "help": "If enabled the screenshot captured by browsertrix will be extracted into separate Media and appear in the html report. The .wacz file will be kept untouched."},
|
||||
"socks_proxy_host": {"default": None, "help": "SOCKS proxy host for browsertrix-crawler, use in combination with socks_proxy_port. eg: user:password@host"},
|
||||
"socks_proxy_port": {"default": None, "help": "SOCKS proxy port for browsertrix-crawler, use in combination with socks_proxy_host. eg 1234"},
|
||||
}
|
||||
|
||||
def setup(self) -> None:
|
||||
self.use_docker = os.environ.get('WACZ_ENABLE_DOCKER') or not os.environ.get('RUNNING_IN_DOCKER')
|
||||
self.docker_in_docker = os.environ.get('WACZ_ENABLE_DOCKER') and os.environ.get('RUNNING_IN_DOCKER')
|
||||
|
||||
self.cwd_dind = f"/crawls/crawls{random_str(8)}"
|
||||
self.browsertrix_home_host = os.environ.get('BROWSERTRIX_HOME_HOST')
|
||||
self.browsertrix_home_container = os.environ.get('BROWSERTRIX_HOME_CONTAINER') or self.browsertrix_home_host
|
||||
# create crawls folder if not exists, so it can be safely removed in cleanup
|
||||
if self.docker_in_docker:
|
||||
os.makedirs(self.cwd_dind, exist_ok=True)
|
||||
|
||||
def cleanup(self) -> None:
|
||||
if self.docker_in_docker:
|
||||
logger.debug(f"Removing {self.cwd_dind=}")
|
||||
shutil.rmtree(self.cwd_dind, ignore_errors=True)
|
||||
|
||||
def download(self, item: Metadata) -> Metadata:
|
||||
# this new Metadata object is required to avoid duplication
|
||||
|
@ -48,27 +67,30 @@ class WaczArchiverEnricher(Enricher, Archiver):
|
|||
url = to_enrich.get_url()
|
||||
|
||||
collection = random_str(8)
|
||||
browsertrix_home_host = os.environ.get('BROWSERTRIX_HOME_HOST') or os.path.abspath(ArchivingContext.get_tmp_dir())
|
||||
browsertrix_home_container = os.environ.get('BROWSERTRIX_HOME_CONTAINER') or browsertrix_home_host
|
||||
browsertrix_home_host = self.browsertrix_home_host or os.path.abspath(ArchivingContext.get_tmp_dir())
|
||||
browsertrix_home_container = self.browsertrix_home_container or browsertrix_home_host
|
||||
|
||||
cmd = [
|
||||
"crawl",
|
||||
"--url", url,
|
||||
"--scopeType", "page",
|
||||
"--generateWACZ",
|
||||
"--text",
|
||||
"--text", "to-pages",
|
||||
"--screenshot", "fullPage",
|
||||
"--collection", collection,
|
||||
"--id", collection,
|
||||
"--saveState", "never",
|
||||
"--behaviors", "autoscroll,autoplay,autofetch,siteSpecific",
|
||||
"--behaviorTimeout", str(self.timeout),
|
||||
"--timeout", str(self.timeout)]
|
||||
"--timeout", str(self.timeout),
|
||||
"--blockAds" # TODO: test
|
||||
]
|
||||
|
||||
if self.docker_in_docker:
|
||||
cmd.extend(["--cwd", self.cwd_dind])
|
||||
|
||||
# call docker if explicitly enabled or we are running on the host (not in docker)
|
||||
use_docker = os.environ.get('WACZ_ENABLE_DOCKER') or not os.environ.get('RUNNING_IN_DOCKER')
|
||||
|
||||
if use_docker:
|
||||
if self.use_docker:
|
||||
logger.debug(f"generating WACZ in Docker for {url=}")
|
||||
logger.debug(f"{browsertrix_home_host=} {browsertrix_home_container=}")
|
||||
if self.docker_commands:
|
||||
|
@ -90,12 +112,20 @@ class WaczArchiverEnricher(Enricher, Archiver):
|
|||
|
||||
try:
|
||||
logger.info(f"Running browsertrix-crawler: {' '.join(cmd)}")
|
||||
subprocess.run(cmd, check=True)
|
||||
my_env = os.environ.copy()
|
||||
if self.socks_proxy_host and self.socks_proxy_port:
|
||||
logger.debug("Using SOCKS proxy for browsertrix-crawler")
|
||||
my_env["SOCKS_HOST"] = self.socks_proxy_host
|
||||
my_env["SOCKS_PORT"] = str(self.socks_proxy_port)
|
||||
subprocess.run(cmd, check=True, env=my_env)
|
||||
except Exception as e:
|
||||
logger.error(f"WACZ generation failed: {e}")
|
||||
return False
|
||||
|
||||
if use_docker:
|
||||
|
||||
if self.docker_in_docker:
|
||||
wacz_fn = os.path.join(self.cwd_dind, "collections", collection, f"{collection}.wacz")
|
||||
elif self.use_docker:
|
||||
wacz_fn = os.path.join(browsertrix_home_container, "collections", collection, f"{collection}.wacz")
|
||||
else:
|
||||
wacz_fn = os.path.join("collections", collection, f"{collection}.wacz")
|
||||
|
@ -105,10 +135,12 @@ class WaczArchiverEnricher(Enricher, Archiver):
|
|||
return False
|
||||
|
||||
to_enrich.add_media(Media(wacz_fn), "browsertrix")
|
||||
if self.extract_media:
|
||||
if self.extract_media or self.extract_screenshot:
|
||||
self.extract_media_from_wacz(to_enrich, wacz_fn)
|
||||
|
||||
if use_docker:
|
||||
if self.docker_in_docker:
|
||||
jsonl_fn = os.path.join(self.cwd_dind, "collections", collection, "pages", "pages.jsonl")
|
||||
elif self.use_docker:
|
||||
jsonl_fn = os.path.join(browsertrix_home_container, "collections", collection, "pages", "pages.jsonl")
|
||||
else:
|
||||
jsonl_fn = os.path.join("collections", collection, "pages", "pages.jsonl")
|
||||
|
@ -131,7 +163,7 @@ class WaczArchiverEnricher(Enricher, Archiver):
|
|||
"""
|
||||
Receives a .wacz archive, and extracts all relevant media from it, adding them to to_enrich.
|
||||
"""
|
||||
logger.info(f"WACZ extract_media flag is set, extracting media from {wacz_filename=}")
|
||||
logger.info(f"WACZ extract_media or extract_screenshot flag is set, extracting media from {wacz_filename=}")
|
||||
|
||||
# unzipping the .wacz
|
||||
tmp_dir = ArchivingContext.get_tmp_dir()
|
||||
|
@ -152,15 +184,17 @@ class WaczArchiverEnricher(Enricher, Archiver):
|
|||
# get media out of .warc
|
||||
counter = 0
|
||||
seen_urls = set()
|
||||
import json
|
||||
with open(warc_filename, 'rb') as warc_stream:
|
||||
for record in ArchiveIterator(warc_stream):
|
||||
# only include fetched resources
|
||||
if record.rec_type == "resource": # screenshots
|
||||
if record.rec_type == "resource" and record.content_type == "image/png" and self.extract_screenshot: # screenshots
|
||||
fn = os.path.join(tmp_dir, f"warc-file-{counter}.png")
|
||||
with open(fn, "wb") as outf: outf.write(record.raw_stream.read())
|
||||
m = Media(filename=fn)
|
||||
to_enrich.add_media(m, "browsertrix-screenshot")
|
||||
counter += 1
|
||||
if not self.extract_media: continue
|
||||
|
||||
if record.rec_type != 'response': continue
|
||||
record_url = record.rec_headers.get_header('WARC-Target-URI')
|
||||
|
@ -189,7 +223,7 @@ class WaczArchiverEnricher(Enricher, Archiver):
|
|||
# if a link with better quality exists, try to download that
|
||||
if record_url_best_qual != record_url:
|
||||
try:
|
||||
m.filename = self.download_from_url(record_url_best_qual, warc_fn, to_enrich)
|
||||
m.filename = self.download_from_url(record_url_best_qual, warc_fn)
|
||||
m.set("src", record_url_best_qual)
|
||||
m.set("src_alternative", record_url)
|
||||
except Exception as e: logger.warning(f"Unable to download best quality URL for {record_url=} got error {e}, using original in WARC.")
|
||||
|
@ -200,4 +234,4 @@ class WaczArchiverEnricher(Enricher, Archiver):
|
|||
to_enrich.add_media(m, warc_fn)
|
||||
counter += 1
|
||||
seen_urls.add(record_url)
|
||||
logger.info(f"WACZ extract_media finished, found {counter} relevant media file(s)")
|
||||
logger.info(f"WACZ extract_media/extract_screenshot finished, found {counter} relevant media file(s)")
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
from loguru import logger
|
||||
import time, requests
|
||||
|
||||
|
||||
from . import Enricher
|
||||
from ..archivers import Archiver
|
||||
from ..utils import UrlUtil
|
||||
|
@ -9,7 +8,9 @@ from ..core import Metadata
|
|||
|
||||
class WaybackArchiverEnricher(Enricher, Archiver):
|
||||
"""
|
||||
Submits the current URL to the webarchive and returns a job_id or completed archive
|
||||
Submits the current URL to the webarchive and returns a job_id or completed archive.
|
||||
|
||||
The Wayback machine will rate-limit IP heavy usage.
|
||||
"""
|
||||
name = "wayback_archiver_enricher"
|
||||
|
||||
|
@ -25,7 +26,9 @@ class WaybackArchiverEnricher(Enricher, Archiver):
|
|||
"timeout": {"default": 15, "help": "seconds to wait for successful archive confirmation from wayback, if more than this passes the result contains the job_id so the status can later be checked manually."},
|
||||
"if_not_archived_within": {"default": None, "help": "only tell wayback to archive if no archive is available before the number of seconds specified, use None to ignore this option. For more information: https://docs.google.com/document/d/1Nsv52MvSjbLb2PCpHlat0gkzw0EvtSgpKHu4mk0MnrA"},
|
||||
"key": {"default": None, "help": "wayback API key. to get credentials visit https://archive.org/account/s3.php"},
|
||||
"secret": {"default": None, "help": "wayback API secret. to get credentials visit https://archive.org/account/s3.php"}
|
||||
"secret": {"default": None, "help": "wayback API secret. to get credentials visit https://archive.org/account/s3.php"},
|
||||
"proxy_http": {"default": None, "help": "http proxy to use for wayback requests, eg http://proxy-user:password@proxy-ip:port"},
|
||||
"proxy_https": {"default": None, "help": "https proxy to use for wayback requests, eg https://proxy-user:password@proxy-ip:port"},
|
||||
}
|
||||
|
||||
def download(self, item: Metadata) -> Metadata:
|
||||
|
@ -36,6 +39,10 @@ class WaybackArchiverEnricher(Enricher, Archiver):
|
|||
return result.success("wayback")
|
||||
|
||||
def enrich(self, to_enrich: Metadata) -> bool:
|
||||
proxies = {}
|
||||
if self.proxy_http: proxies["http"] = self.proxy_http
|
||||
if self.proxy_https: proxies["https"] = self.proxy_https
|
||||
|
||||
url = to_enrich.get_url()
|
||||
if UrlUtil.is_auth_wall(url):
|
||||
logger.debug(f"[SKIP] WAYBACK since url is behind AUTH WALL: {url=}")
|
||||
|
@ -55,7 +62,7 @@ class WaybackArchiverEnricher(Enricher, Archiver):
|
|||
if self.if_not_archived_within:
|
||||
post_data["if_not_archived_within"] = self.if_not_archived_within
|
||||
# see https://docs.google.com/document/d/1Nsv52MvSjbLb2PCpHlat0gkzw0EvtSgpKHu4mk0MnrA for more options
|
||||
r = requests.post('https://web.archive.org/save/', headers=ia_headers, data=post_data)
|
||||
r = requests.post('https://web.archive.org/save/', headers=ia_headers, data=post_data, proxies=proxies)
|
||||
|
||||
if r.status_code != 200:
|
||||
logger.error(em := f"Internet archive failed with status of {r.status_code}: {r.json()}")
|
||||
|
@ -75,14 +82,16 @@ class WaybackArchiverEnricher(Enricher, Archiver):
|
|||
while not wayback_url and time.time() - start_time <= self.timeout:
|
||||
try:
|
||||
logger.debug(f"GETting status for {job_id=} on {url=} ({attempt=})")
|
||||
r_status = requests.get(f'https://web.archive.org/save/status/{job_id}', headers=ia_headers)
|
||||
r_status = requests.get(f'https://web.archive.org/save/status/{job_id}', headers=ia_headers, proxies=proxies)
|
||||
r_json = r_status.json()
|
||||
if r_status.status_code == 200 and r_json['status'] == 'success':
|
||||
wayback_url = f"https://web.archive.org/web/{r_json['timestamp']}/{r_json['original_url']}"
|
||||
elif r_status.status_code != 200 or r_json['status'] != 'pending':
|
||||
logger.error(f"Wayback failed with {r_json}")
|
||||
return False
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.warning(f"RequestException: fetching status for {url=} due to: {e}")
|
||||
break
|
||||
except Exception as e:
|
||||
logger.warning(f"error fetching status for {url=} due to: {e}")
|
||||
if not wayback_url:
|
||||
|
|
|
@ -10,7 +10,7 @@ from ..storages import S3Storage
|
|||
class WhisperEnricher(Enricher):
|
||||
"""
|
||||
Connects with a Whisper API service to get texts out of audio
|
||||
whisper API repository: TODO
|
||||
whisper API repository: https://github.com/bellingcat/whisperbox-transcribe/
|
||||
Only works if an S3 compatible storage is used
|
||||
"""
|
||||
name = "whisper_enricher"
|
||||
|
@ -44,7 +44,7 @@ class WhisperEnricher(Enricher):
|
|||
job_results = {}
|
||||
for i, m in enumerate(to_enrich.media):
|
||||
if m.is_video() or m.is_audio():
|
||||
m.store(url=url)
|
||||
m.store(url=url, metadata=to_enrich)
|
||||
try:
|
||||
job_id = self.submit_job(m)
|
||||
job_results[job_id] = False
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
from.feeder import Feeder
|
||||
from .gsheet_feeder import GsheetsFeeder
|
||||
from .cli_feeder import CLIFeeder
|
||||
from .cli_feeder import CLIFeeder
|
||||
from .atlos_feeder import AtlosFeeder
|
|
@ -0,0 +1,56 @@
|
|||
from loguru import logger
|
||||
import requests
|
||||
|
||||
from . import Feeder
|
||||
from ..core import Metadata, ArchivingContext
|
||||
from ..utils import get_atlos_config_options
|
||||
|
||||
|
||||
class AtlosFeeder(Feeder):
|
||||
name = "atlos_feeder"
|
||||
|
||||
def __init__(self, config: dict) -> None:
|
||||
# without this STEP.__init__ is not called
|
||||
super().__init__(config)
|
||||
if type(self.api_token) != str:
|
||||
raise Exception("Atlos Feeder did not receive an Atlos API token")
|
||||
|
||||
@staticmethod
|
||||
def configs() -> dict:
|
||||
return get_atlos_config_options()
|
||||
|
||||
def __iter__(self) -> Metadata:
|
||||
# Get all the urls from the Atlos API
|
||||
count = 0
|
||||
cursor = None
|
||||
while True:
|
||||
response = requests.get(
|
||||
f"{self.atlos_url}/api/v2/source_material",
|
||||
headers={"Authorization": f"Bearer {self.api_token}"},
|
||||
params={"cursor": cursor},
|
||||
)
|
||||
data = response.json()
|
||||
response.raise_for_status()
|
||||
cursor = data["next"]
|
||||
|
||||
for item in data["results"]:
|
||||
if (
|
||||
item["source_url"] not in [None, ""]
|
||||
and (
|
||||
item["metadata"]
|
||||
.get("auto_archiver", {})
|
||||
.get("processed", False)
|
||||
!= True
|
||||
)
|
||||
and item["visibility"] == "visible"
|
||||
and item["status"] not in ["processing", "pending"]
|
||||
):
|
||||
yield Metadata().set_url(item["source_url"]).set(
|
||||
"atlos_id", item["id"]
|
||||
)
|
||||
count += 1
|
||||
|
||||
if len(data["results"]) == 0 or cursor is None:
|
||||
break
|
||||
|
||||
logger.success(f"Processed {count} URL(s)")
|
|
@ -4,6 +4,8 @@ import mimetypes, os, pathlib
|
|||
from jinja2 import Environment, FileSystemLoader
|
||||
from urllib.parse import quote
|
||||
from loguru import logger
|
||||
import minify_html, json
|
||||
import base64
|
||||
|
||||
from ..version import __version__
|
||||
from ..core import Metadata, Media, ArchivingContext
|
||||
|
@ -19,7 +21,7 @@ class HtmlFormatter(Formatter):
|
|||
def __init__(self, config: dict) -> None:
|
||||
# without this STEP.__init__ is not called
|
||||
super().__init__(config)
|
||||
self.environment = Environment(loader=FileSystemLoader(os.path.join(pathlib.Path(__file__).parent.resolve(), "templates/")))
|
||||
self.environment = Environment(loader=FileSystemLoader(os.path.join(pathlib.Path(__file__).parent.resolve(), "templates/")), autoescape=True)
|
||||
# JinjaHelper class static methods are added as filters
|
||||
self.environment.filters.update({
|
||||
k: v.__func__ for k, v in JinjaHelpers.__dict__.items() if isinstance(v, staticmethod)
|
||||
|
@ -45,6 +47,8 @@ class HtmlFormatter(Formatter):
|
|||
metadata=item.metadata,
|
||||
version=__version__
|
||||
)
|
||||
content = minify_html.minify(content, minify_js=False, minify_css=True)
|
||||
|
||||
html_path = os.path.join(ArchivingContext.get_tmp_dir(), f"formatted{random_str(24)}.html")
|
||||
with open(html_path, mode="w", encoding="utf-8") as outf:
|
||||
outf.write(content)
|
||||
|
@ -89,3 +93,8 @@ class JinjaHelpers:
|
|||
@staticmethod
|
||||
def quote(s: str) -> str:
|
||||
return quote(s)
|
||||
|
||||
@staticmethod
|
||||
def json_dump_b64(d: dict) -> str:
|
||||
j = json.dumps(d, indent=4, default=str)
|
||||
return base64.b64encode(j.encode()).decode()
|
||||
|
|
|
@ -96,12 +96,22 @@
|
|||
overflow: hidden;
|
||||
background-color: #f1f1f1;
|
||||
}
|
||||
|
||||
.pem-certificate, .text-preview {
|
||||
text-align: left;
|
||||
font-size: small;
|
||||
}
|
||||
.text-preview{
|
||||
padding-left: 10px;
|
||||
padding-right: 10px;
|
||||
white-space: pre-wrap;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div id="notification"></div>
|
||||
<h2>Archived media for <a href="{{ url }}">{{ url }}</a></h2>
|
||||
<h2>Archived media for <span class="copy">{{ url }}</span> - <a href="{{ url }}">open</a></h2>
|
||||
{% if title | string | length > 0 %}
|
||||
<p><b>title:</b> '<span class="copy">{{ title }}</span>'</p>
|
||||
{% endif %}
|
||||
|
@ -115,48 +125,13 @@
|
|||
<table class="content">
|
||||
<tr>
|
||||
<th>about</th>
|
||||
<th>preview(s)</th>
|
||||
<th>files and preview</th>
|
||||
</tr>
|
||||
<tbody>
|
||||
{% for m in media %}
|
||||
<tr>
|
||||
<td>
|
||||
<ul>
|
||||
<li><b>key:</b> <span class="copy">{{ m.key }}</span></li>
|
||||
<li><b>type:</b> <span class="copy">{{ m.mimetype }}</span></li>
|
||||
|
||||
{% for prop in m.properties %}
|
||||
|
||||
{% if m.properties[prop] | is_list %}
|
||||
<p></p>
|
||||
<div>
|
||||
<b class="collapsible" title="expand">{{ prop }}:</b>
|
||||
<p></p>
|
||||
<div class="collapsible-content">
|
||||
{% for subprop in m.properties[prop] %}
|
||||
{% if subprop | is_media %}
|
||||
{{ macros.display_media(subprop, true, url) }}
|
||||
|
||||
<ul>
|
||||
{% for subprop_prop in subprop.properties %}
|
||||
<li><b>{{ subprop_prop }}:</b>
|
||||
{{ macros.copy_urlize(subprop.properties[subprop_prop]) }}</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
|
||||
{% else %}
|
||||
{{ subprop }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</div>
|
||||
</div>
|
||||
<p></p>
|
||||
{% elif m.properties[prop] | string | length > 1 %}
|
||||
<li><b>{{ prop }}:</b> {{ macros.copy_urlize(m.properties[prop]) }}</li>
|
||||
{% endif %}
|
||||
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{{ macros.display_recursive(m, true) }}
|
||||
</td>
|
||||
<td>
|
||||
{{ macros.display_media(m, true, url) }}
|
||||
|
@ -175,16 +150,77 @@
|
|||
<tr>
|
||||
<td>{{ key }}</td>
|
||||
<td>
|
||||
{% if metadata[key] is mapping %}
|
||||
<div class="center copy" copy-value64='{{metadata[key] | json_dump_b64}}'>Copy as JSON</div>
|
||||
{% endif %}
|
||||
{{ macros.copy_urlize(metadata[key]) }}
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
|
||||
<p style="text-align:center;">Made with <a
|
||||
href="https://github.com/bellingcat/auto-archiver">bellingcat/auto-archiver</a> v{{ version }}</p>
|
||||
<p class="center">Made with <a href="https://github.com/bellingcat/auto-archiver">bellingcat/auto-archiver</a>
|
||||
v{{ version }}</p>
|
||||
</body>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/forge/0.10.0/forge.min.js"></script>
|
||||
<script defer>
|
||||
// partial decode of SSL certificates
|
||||
function decodeCertificate(sslCert) {
|
||||
var cert = forge.pki.certificateFromPem(sslCert);
|
||||
return `SSL CERTIFICATE PREVIEW:<br/><ul>
|
||||
<li><b>Subject:</b> <span class="copy">${cert.subject.attributes.map(attr => `${attr.shortName}: ${attr.value}`).join(", ")}</span></li>
|
||||
<li><b>Issuer:</b> <span class="copy">${cert.issuer.attributes.map(attr => `${attr.shortName}: ${attr.value}`).join(", ")}</span></li>
|
||||
<li><b>Valid From:</b> <span class="copy">${cert.validity.notBefore}</span></li>
|
||||
<li><b>Valid To:</b> <span class="copy">${cert.validity.notAfter}</span></li>
|
||||
<li><b>Serial Number:</b> <span class="copy">${cert.serialNumber}</span></li>
|
||||
</ul>`;
|
||||
}
|
||||
|
||||
async function run() {
|
||||
let setupFunctions = [
|
||||
previewCertificates,
|
||||
previewText,
|
||||
enableCopyLogic,
|
||||
enableCollapsibleLogic,
|
||||
setupSafeView
|
||||
];
|
||||
setupFunctions.forEach(async f => {
|
||||
try {
|
||||
await f();
|
||||
} catch (e) {
|
||||
console.error(`Error in ${f.name}: ${e}`);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async function previewCertificates() {
|
||||
await Promise.all(
|
||||
Array.from(document.querySelectorAll(".pem-certificate")).map(async el => {
|
||||
let certificate = await (await fetch(el.getAttribute("pem"))).text();
|
||||
el.innerHTML = decodeCertificate(certificate);
|
||||
|
||||
let cyberChefUrl =
|
||||
`https://gchq.github.io/CyberChef/#recipe=Parse_X.509_certificate('PEM')&input=${btoa(certificate)}`;
|
||||
// create a new anchor with this url and append after the code
|
||||
let a = document.createElement("a");
|
||||
a.href = cyberChefUrl;
|
||||
a.textContent = "Full certificate details";
|
||||
el.parentElement.appendChild(a);
|
||||
})
|
||||
);
|
||||
console.log("certificate preview done");
|
||||
}
|
||||
|
||||
async function previewText() {
|
||||
await Promise.all(
|
||||
Array.from(document.querySelectorAll(".text-preview")).map(async el => {
|
||||
let textContent = await (await fetch(el.getAttribute("url"))).text();
|
||||
el.textContent = textContent;
|
||||
})
|
||||
);
|
||||
console.log("text preview done");
|
||||
}
|
||||
|
||||
// notification logic
|
||||
const notification = document.getElementById("notification");
|
||||
|
||||
|
@ -198,83 +234,99 @@
|
|||
}
|
||||
|
||||
// copy logic
|
||||
Array.from(document.querySelectorAll(".copy")).forEach(el => {
|
||||
el.onclick = () => {
|
||||
document.execCommand("copy");
|
||||
}
|
||||
el.addEventListener("copy", (e) => {
|
||||
e.preventDefault();
|
||||
if (e.clipboardData) {
|
||||
if (el.hasAttribute("copy-value")) {
|
||||
e.clipboardData.setData("text/plain", el.getAttribute("copy-value"));
|
||||
} else {
|
||||
e.clipboardData.setData("text/plain", el.textContent);
|
||||
async function enableCopyLogic() {
|
||||
await Promise.all(
|
||||
Array.from(document.querySelectorAll(".copy")).map(el => {
|
||||
el.onclick = () => {
|
||||
document.execCommand("copy");
|
||||
}
|
||||
console.log(e.clipboardData.getData("text"))
|
||||
showNotification("copied!")
|
||||
}
|
||||
})
|
||||
})
|
||||
el.addEventListener("copy", (e) => {
|
||||
e.preventDefault();
|
||||
if (e.clipboardData) {
|
||||
if (el.hasAttribute("copy-value")) {
|
||||
e.clipboardData.setData("text/plain", el.getAttribute("copy-value"));
|
||||
} else if (el.hasAttribute("copy-value64")) {
|
||||
// TODO: figure out how to decode unicode chars into utf-8
|
||||
e.clipboardData.setData("text/plain", new String(atob(el.getAttribute(
|
||||
"copy-value64"))));
|
||||
} else {
|
||||
e.clipboardData.setData("text/plain", el.textContent);
|
||||
}
|
||||
console.log(e.clipboardData.getData("text"))
|
||||
showNotification("copied!")
|
||||
}
|
||||
})
|
||||
})
|
||||
)
|
||||
console.log("copy logic enabled");
|
||||
}
|
||||
|
||||
// collapsibles
|
||||
let coll = document.getElementsByClassName("collapsible");
|
||||
let i;
|
||||
|
||||
for (i = 0; i < coll.length; i++) {
|
||||
coll[i].addEventListener("click", function () {
|
||||
this.classList.toggle("active");
|
||||
// let content = this.nextElementSibling;
|
||||
let content = this.parentElement.querySelector(".collapsible-content");
|
||||
if (content.style.display === "block") {
|
||||
content.style.display = "none";
|
||||
} else {
|
||||
content.style.display = "block";
|
||||
}
|
||||
});
|
||||
async function enableCollapsibleLogic() {
|
||||
let coll = document.getElementsByClassName("collapsible");
|
||||
for (let i = 0; i < coll.length; i++) {
|
||||
await new Promise(resolve => {
|
||||
coll[i].addEventListener("click", function () {
|
||||
this.classList.toggle("active");
|
||||
// let content = this.nextElementSibling;
|
||||
let content = this.parentElement.querySelector(".collapsible-content");
|
||||
if (content.style.display === "block") {
|
||||
content.style.display = "none";
|
||||
} else {
|
||||
content.style.display = "block";
|
||||
}
|
||||
});
|
||||
resolve();
|
||||
})
|
||||
}
|
||||
console.log("collapsible logic enabled");
|
||||
}
|
||||
|
||||
// logic for enabled/disabled greyscale
|
||||
// Get references to the checkboxes and images/videos
|
||||
const safeImageViewCheckbox = document.getElementById('safe-media-view');
|
||||
const imagesVideos = document.querySelectorAll('img, video');
|
||||
async function setupSafeView() {
|
||||
// logic for enabled/disabled greyscale
|
||||
// Get references to the checkboxes and images/videos
|
||||
const safeImageViewCheckbox = document.getElementById('safe-media-view');
|
||||
const imagesVideos = document.querySelectorAll('img, video');
|
||||
|
||||
// Function to toggle grayscale effect
|
||||
function toggleGrayscale() {
|
||||
// Function to toggle grayscale effect
|
||||
function toggleGrayscale() {
|
||||
imagesVideos.forEach(element => {
|
||||
if (safeImageViewCheckbox.checked) {
|
||||
// Enable grayscale effect
|
||||
element.style.filter = 'grayscale(1)';
|
||||
element.style.webkitFilter = 'grayscale(1)';
|
||||
} else {
|
||||
// Disable grayscale effect
|
||||
element.style.filter = 'none';
|
||||
element.style.webkitFilter = 'none';
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Add event listener to the checkbox to trigger the toggleGrayscale function
|
||||
safeImageViewCheckbox.addEventListener('change', toggleGrayscale);
|
||||
|
||||
// Handle the hover effect using JavaScript
|
||||
imagesVideos.forEach(element => {
|
||||
if (safeImageViewCheckbox.checked) {
|
||||
// Enable grayscale effect
|
||||
element.style.filter = 'grayscale(1)';
|
||||
element.style.webkitFilter = 'grayscale(1)';
|
||||
} else {
|
||||
// Disable grayscale effect
|
||||
element.addEventListener('mouseenter', () => {
|
||||
// Disable grayscale effect on hover
|
||||
element.style.filter = 'none';
|
||||
element.style.webkitFilter = 'none';
|
||||
}
|
||||
});
|
||||
|
||||
element.addEventListener('mouseleave', () => {
|
||||
// Re-enable grayscale effect if checkbox is checked
|
||||
if (safeImageViewCheckbox.checked) {
|
||||
element.style.filter = 'grayscale(1)';
|
||||
element.style.webkitFilter = 'grayscale(1)';
|
||||
}
|
||||
});
|
||||
});
|
||||
toggleGrayscale();
|
||||
console.log("grayscale logic enabled");
|
||||
}
|
||||
|
||||
// Add event listener to the checkbox to trigger the toggleGrayscale function
|
||||
safeImageViewCheckbox.addEventListener('change', toggleGrayscale);
|
||||
|
||||
// Handle the hover effect using JavaScript
|
||||
imagesVideos.forEach(element => {
|
||||
element.addEventListener('mouseenter', () => {
|
||||
// Disable grayscale effect on hover
|
||||
element.style.filter = 'none';
|
||||
element.style.webkitFilter = 'none';
|
||||
});
|
||||
|
||||
element.addEventListener('mouseleave', () => {
|
||||
// Re-enable grayscale effect if checkbox is checked
|
||||
if (safeImageViewCheckbox.checked) {
|
||||
element.style.filter = 'grayscale(1)';
|
||||
element.style.webkitFilter = 'grayscale(1)';
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// Call the function on page load to apply the initial state
|
||||
toggleGrayscale();
|
||||
run();
|
||||
</script>
|
||||
|
||||
</html>
|
|
@ -18,6 +18,12 @@ No URL available for {{ m.key }}.
|
|||
<a href="https://www.bing.com/images/search?view=detailv2&iss=sbi&form=SBIVSP&sbisrc=UrlPaste&q=imgurl:{{ url | quote }}">Bing</a>,
|
||||
<a href="https://www.tineye.com/search/?url={{ url | quote }}">Tineye</a>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
Image Forensics:
|
||||
<a href="https://fotoforensics.com/?url={{ url | quote }}">FotoForensics</a>,
|
||||
<a href="https://mever.iti.gr/forensics/?image={{ url }}">Media Verification Assistant</a>
|
||||
</div>
|
||||
<p></p>
|
||||
</div>
|
||||
{% elif 'video' in m.mimetype %}
|
||||
|
@ -35,8 +41,15 @@ No URL available for {{ m.key }}.
|
|||
</div>
|
||||
{% elif m.filename | get_extension == ".wacz" %}
|
||||
<a href="https://replayweb.page/?source={{ url | quote }}#view=pages&url={{ main_url }}">replayweb</a>
|
||||
|
||||
{% elif m.filename | get_extension == ".pem" %}
|
||||
<code class="pem-certificate" pem="{{url}}"></code>
|
||||
|
||||
{% elif 'text' in m.mimetype %}
|
||||
<div>PREVIEW:<br/><code><pre class="text-preview" url="{{url}}"></pre></code></div>
|
||||
|
||||
{% else %}
|
||||
No preview available for {{ m.key }}.
|
||||
No preview available for <code>{{ m.key }}</code>.
|
||||
{% endif %}
|
||||
{% else %}
|
||||
{{ m.url | urlize }}
|
||||
|
@ -54,7 +67,12 @@ No preview available for {{ m.key }}.
|
|||
|
||||
{% macro copy_urlize(val, href_text) -%}
|
||||
|
||||
{% if val is mapping %}
|
||||
{% if val | is_list %}
|
||||
{% for item in val %}
|
||||
{{ copy_urlize(item) }}
|
||||
{% endfor %}
|
||||
|
||||
{% elif val is mapping %}
|
||||
<ul>
|
||||
{% for key in val %}
|
||||
<li>
|
||||
|
@ -64,11 +82,66 @@ No preview available for {{ m.key }}.
|
|||
</ul>
|
||||
|
||||
{% else %}
|
||||
{% if href_text | length == 0 %}
|
||||
{% if href_text | length == 0 %}
|
||||
<span class="copy">{{ val | string | urlize }}</span>
|
||||
{% else %}
|
||||
<span class="copy" copy-value="{{val}}">{{ href_text | string | urlize }}</span>
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{%- endmacro -%}
|
||||
|
||||
|
||||
{% macro display_recursive(prop, skip_display) -%}
|
||||
{% if prop is mapping %}
|
||||
<div class="center copy" copy-value64='{{prop | json_dump_b64}}'>Copy as JSON</div>
|
||||
<ul>
|
||||
{% for subprop in prop %}
|
||||
<li>
|
||||
<b>{{ subprop }}:</b>
|
||||
{{ display_recursive(prop[subprop]) }}
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
|
||||
{% elif prop | is_list %}
|
||||
{% for item in prop %}
|
||||
<li>
|
||||
{{ display_recursive(item) }}
|
||||
</li>
|
||||
{% endfor %}
|
||||
|
||||
|
||||
{% elif prop | is_media %}
|
||||
{% if not skip_display %}
|
||||
{{ display_media(prop, true) }}
|
||||
{% endif %}
|
||||
<ul>
|
||||
<li><b>key:</b> <span class="copy">{{ prop.key }}</span></li>
|
||||
<li><b>type:</b> <span class="copy">{{ prop.mimetype }}</span></li>
|
||||
{% for subprop in prop.properties %}
|
||||
|
||||
|
||||
{% if prop.properties[subprop] | is_list %}
|
||||
<p></p>
|
||||
<div>
|
||||
<b class="collapsible" title="expand">{{ subprop }} ({{ prop.properties[subprop] | length }}):</b>
|
||||
<p></p>
|
||||
<div class="collapsible-content">
|
||||
{% for subsubprop in prop.properties[subprop] %}
|
||||
{{ display_recursive(subsubprop) }}
|
||||
{% endfor %}
|
||||
</div>
|
||||
</div>
|
||||
<p></p>
|
||||
{% elif prop.properties[subprop] | string | length > 1 %}
|
||||
<li><b>{{ subprop }}:</b> {{ copy_urlize(prop.properties[subprop]) }}</li>
|
||||
{% endif %}
|
||||
|
||||
{% endfor %}
|
||||
|
||||
</ul>
|
||||
{% else %}
|
||||
{{ copy_urlize(prop) }}
|
||||
{% endif %}
|
||||
{%- endmacro -%}
|
|
@ -1,4 +1,5 @@
|
|||
from .storage import Storage
|
||||
from .s3 import S3Storage
|
||||
from .local import LocalStorage
|
||||
from .gd import GDriveStorage
|
||||
from .gd import GDriveStorage
|
||||
from .atlos import AtlosStorage
|
|
@ -0,0 +1,74 @@
|
|||
import os
|
||||
from typing import IO, List, Optional
|
||||
from loguru import logger
|
||||
import requests
|
||||
import hashlib
|
||||
|
||||
from ..core import Media, Metadata
|
||||
from ..storages import Storage
|
||||
from ..utils import get_atlos_config_options
|
||||
|
||||
|
||||
class AtlosStorage(Storage):
|
||||
name = "atlos_storage"
|
||||
|
||||
def __init__(self, config: dict) -> None:
|
||||
super().__init__(config)
|
||||
|
||||
@staticmethod
|
||||
def configs() -> dict:
|
||||
return dict(Storage.configs(), **get_atlos_config_options())
|
||||
|
||||
def get_cdn_url(self, _media: Media) -> str:
|
||||
# It's not always possible to provide an exact URL, because it's
|
||||
# possible that the media once uploaded could have been copied to
|
||||
# another project.
|
||||
return self.atlos_url
|
||||
|
||||
def _hash(self, media: Media) -> str:
|
||||
# Hash the media file using sha-256. We don't use the existing auto archiver
|
||||
# hash because there's no guarantee that the configuerer is using sha-256, which
|
||||
# is how Atlos hashes files.
|
||||
|
||||
sha256 = hashlib.sha256()
|
||||
with open(media.filename, "rb") as f:
|
||||
while True:
|
||||
buf = f.read(4096)
|
||||
if not buf: break
|
||||
sha256.update(buf)
|
||||
return sha256.hexdigest()
|
||||
|
||||
def upload(self, media: Media, metadata: Optional[Metadata]=None, **_kwargs) -> bool:
|
||||
atlos_id = metadata.get("atlos_id")
|
||||
if atlos_id is None:
|
||||
logger.error(f"No Atlos ID found in metadata; can't store {media.filename} on Atlos")
|
||||
return False
|
||||
|
||||
media_hash = self._hash(media)
|
||||
|
||||
# Check whether the media has already been uploaded
|
||||
source_material = requests.get(
|
||||
f"{self.atlos_url}/api/v2/source_material/{atlos_id}",
|
||||
headers={"Authorization": f"Bearer {self.api_token}"},
|
||||
).json()["result"]
|
||||
existing_media = [x["file_hash_sha256"] for x in source_material.get("artifacts", [])]
|
||||
if media_hash in existing_media:
|
||||
logger.info(f"{media.filename} with SHA256 {media_hash} already uploaded to Atlos")
|
||||
return True
|
||||
|
||||
# Upload the media to the Atlos API
|
||||
requests.post(
|
||||
f"{self.atlos_url}/api/v2/source_material/upload/{atlos_id}",
|
||||
headers={"Authorization": f"Bearer {self.api_token}"},
|
||||
params={
|
||||
"title": media.properties
|
||||
},
|
||||
files={"file": (os.path.basename(media.filename), open(media.filename, "rb"))},
|
||||
).raise_for_status()
|
||||
|
||||
logger.info(f"Uploaded {media.filename} to Atlos with ID {atlos_id} and title {media.key}")
|
||||
|
||||
return True
|
||||
|
||||
# must be implemented even if unused
|
||||
def uploadf(self, file: IO[bytes], key: str, **kwargs: dict) -> bool: pass
|
|
@ -52,7 +52,7 @@ class GDriveStorage(Storage):
|
|||
else:
|
||||
logger.debug('GD OAuth Token valid')
|
||||
else:
|
||||
gd_service_account = config.service_account
|
||||
gd_service_account = self.service_account
|
||||
logger.debug(f'Using GD Service Account {gd_service_account}')
|
||||
creds = service_account.Credentials.from_service_account_file(gd_service_account, scopes=SCOPES)
|
||||
|
||||
|
@ -87,15 +87,6 @@ class GDriveStorage(Storage):
|
|||
file_id = self._get_id_from_parent_and_name(folder_id, filename)
|
||||
return f"https://drive.google.com/file/d/{file_id}/view?usp=sharing"
|
||||
|
||||
def upload(self, media: Media, **kwargs) -> bool:
|
||||
# override parent so that we can use shutil.copy2 and keep metadata
|
||||
dest = os.path.join(self.save_to, media.key)
|
||||
os.makedirs(os.path.dirname(dest), exist_ok=True)
|
||||
logger.debug(f'[{self.__class__.name}] storing file {media.filename} with key {media.key} to {dest}')
|
||||
res = shutil.copy2(media.filename, dest)
|
||||
logger.info(res)
|
||||
return True
|
||||
|
||||
def upload(self, media: Media, **kwargs) -> bool:
|
||||
logger.debug(f'[{self.__class__.name}] storing file {media.filename} with key {media.key}')
|
||||
"""
|
||||
|
|
|
@ -52,20 +52,6 @@ class S3Storage(Storage):
|
|||
def uploadf(self, file: IO[bytes], media: Media, **kwargs: dict) -> None:
|
||||
if not self.is_upload_needed(media): return True
|
||||
|
||||
if self.random_no_duplicate:
|
||||
# checks if a folder with the hash already exists, if so it skips the upload
|
||||
he = HashEnricher({"hash_enricher": {"algorithm": "SHA-256", "chunksize": 1.6e7}})
|
||||
hd = he.calculate_hash(media.filename)
|
||||
path = os.path.join(NO_DUPLICATES_FOLDER, hd[:24])
|
||||
|
||||
if existing_key:=self.file_in_folder(path):
|
||||
media.key = existing_key
|
||||
logger.debug(f"skipping upload of {media.filename} because it already exists in {media.key}")
|
||||
return True
|
||||
|
||||
_, ext = os.path.splitext(media.key)
|
||||
media.key = os.path.join(path, f"{random_str(24)}{ext}")
|
||||
|
||||
extra_args = kwargs.get("extra_args", {})
|
||||
if not self.private and 'ACL' not in extra_args:
|
||||
extra_args['ACL'] = 'public-read'
|
||||
|
@ -89,6 +75,7 @@ class S3Storage(Storage):
|
|||
|
||||
if existing_key:=self.file_in_folder(path):
|
||||
media.key = existing_key
|
||||
media.set("previously archived", True)
|
||||
logger.debug(f"skipping upload of {media.filename} because it already exists in {media.key}")
|
||||
return False
|
||||
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
from __future__ import annotations
|
||||
from abc import abstractmethod
|
||||
from dataclasses import dataclass
|
||||
from typing import IO
|
||||
from typing import IO, Optional
|
||||
import os
|
||||
|
||||
from ..utils.misc import random_str
|
||||
|
||||
from ..core import Media, Step, ArchivingContext
|
||||
from ..core import Media, Step, ArchivingContext, Metadata
|
||||
from ..enrichers import HashEnricher
|
||||
from loguru import logger
|
||||
from slugify import slugify
|
||||
|
@ -43,12 +43,12 @@ class Storage(Step):
|
|||
# only for typing...
|
||||
return Step.init(name, config, Storage)
|
||||
|
||||
def store(self, media: Media, url: str) -> None:
|
||||
def store(self, media: Media, url: str, metadata: Optional[Metadata]=None) -> None:
|
||||
if media.is_stored():
|
||||
logger.debug(f"{media.key} already stored, skipping")
|
||||
return
|
||||
self.set_key(media, url)
|
||||
self.upload(media)
|
||||
self.upload(media, metadata=metadata)
|
||||
media.add_url(self.get_cdn_url(media))
|
||||
|
||||
@abstractmethod
|
||||
|
|
|
@ -3,4 +3,5 @@ from .gworksheet import GWorksheet
|
|||
from .misc import *
|
||||
from .webdriver import Webdriver
|
||||
from .gsheet import Gsheets
|
||||
from .url import UrlUtil
|
||||
from .url import UrlUtil
|
||||
from .atlos import get_atlos_config_options
|
|
@ -0,0 +1,13 @@
|
|||
def get_atlos_config_options():
|
||||
return {
|
||||
"api_token": {
|
||||
"default": None,
|
||||
"help": "An Atlos API token. For more information, see https://docs.atlos.org/technical/api/",
|
||||
"cli_set": lambda cli_val, _: cli_val
|
||||
},
|
||||
"atlos_url": {
|
||||
"default": "https://platform.atlos.org",
|
||||
"help": "The URL of your Atlos instance (e.g., https://platform.atlos.org), without a trailing slash.",
|
||||
"cli_set": lambda cli_val, _: cli_val
|
||||
},
|
||||
}
|
|
@ -1,21 +1,24 @@
|
|||
from __future__ import annotations
|
||||
from selenium import webdriver
|
||||
from selenium.common.exceptions import TimeoutException
|
||||
from selenium.webdriver.common.proxy import Proxy, ProxyType
|
||||
from loguru import logger
|
||||
from selenium.webdriver.common.by import By
|
||||
import time
|
||||
|
||||
|
||||
class Webdriver:
|
||||
def __init__(self, width: int, height: int, timeout_seconds: int, facebook_accept_cookies: bool = False) -> webdriver:
|
||||
def __init__(self, width: int, height: int, timeout_seconds: int, facebook_accept_cookies: bool = False, http_proxy: str = "") -> webdriver:
|
||||
self.width = width
|
||||
self.height = height
|
||||
self.timeout_seconds = timeout_seconds
|
||||
self.facebook_accept_cookies = facebook_accept_cookies
|
||||
self.http_proxy = http_proxy
|
||||
|
||||
def __enter__(self) -> webdriver:
|
||||
options = webdriver.FirefoxOptions()
|
||||
options.add_argument("--headless")
|
||||
options.add_argument(f'--proxy-server={self.http_proxy}')
|
||||
options.set_preference('network.protocol-handler.external.tg', False)
|
||||
try:
|
||||
self.driver = webdriver.Firefox(options=options)
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
|
||||
_MAJOR = "0"
|
||||
_MINOR = "7"
|
||||
_MINOR = "11"
|
||||
# On main and in a nightly release the patch should be one ahead of the last
|
||||
# released build.
|
||||
_PATCH = "3"
|
||||
_PATCH = "2"
|
||||
# This is mainly for nightly builds which have the suffix ".dev$DATE". See
|
||||
# https://semver.org/#is-v123-a-semantic-version for the semantics.
|
||||
_SUFFIX = ""
|
||||
|
|
Ładowanie…
Reference in New Issue