Merge branch 'main' into feat/yt-dlp-pots

# Conflicts:
#	src/auto_archiver/modules/generic_extractor/__manifest__.py
#	tests/test_modules.py
pull/222/head
erinhmclark 2025-03-25 15:16:31 +00:00
commit b4c33318c4
28 zmienionych plików z 586 dodań i 121 usunięć

40
.github/dependabot.yml vendored 100644
Wyświetl plik

@ -0,0 +1,40 @@
# To get started with Dependabot version updates, you'll need to specify which
# package ecosystems to update and where the package manifests are located.
# Please see the documentation for all configuration options:
# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
version: 2
updates:
- package-ecosystem: "pip"
directory: "/"
groups:
python:
patterns:
- "*"
schedule:
interval: "weekly"
- package-ecosystem: "github-actions"
directory: "/"
groups:
actions:
patterns:
- "*"
schedule:
interval: "weekly"
- package-ecosystem: "npm"
directory: "/scripts/settings/"
groups:
actions:
patterns:
- "*"
schedule:
interval: "weekly"
- package-ecosystem: "docker"
# Look for a `Dockerfile` in the `root` directory
directory: "/"
# Check for updates once a week
schedule:
interval: "weekly"

Wyświetl plik

@ -3,8 +3,18 @@ name: Ruff Formatting & Linting
on:
push:
branches: [ main ]
paths-ignore:
- "README.md"
- ".github"
- "poetry.lock"
- "scripts/settings"
pull_request:
branches: [ main ]
paths-ignore:
- "README.md"
- ".github"
- "poetry.lock"
- "scripts/settings"
jobs:
build:

1
.gitignore vendored
Wyświetl plik

@ -4,6 +4,7 @@ temp/
.DS_Store
expmt/
service_account.json
service_account-*.json
__pycache__/
._*
anu.html

Wyświetl plik

@ -1,4 +1,3 @@
version: '3.8'
services:
auto-archiver:
@ -10,7 +9,4 @@ services:
volumes:
- ./secrets:/app/secrets
- ./local_archive:/app/local_archive
environment:
- WACZ_ENABLE_DOCKER=true
- RUNNING_IN_DOCKER=true
command: --config secrets/orchestration.yaml

Wyświetl plik

@ -6,12 +6,43 @@ This guide explains how to set up Google Sheets to process URLs automatically an
2. Setting up a service account so Auto Archiver can access the sheet
3. Setting the Auto Archiver settings
### 1. Setting up your Google Sheet
Any Google sheet must have at least *one* column, with the name 'link' (you can change this name afterwards). This is the column with the URLs that you want the Auto Archiver to archive.
Your sheet can have many other columns that the Auto Archiver can use, and you can also include any additional columns for your own personal use. The order of the columns does not matter, the naming just needs to be correctly assigned to its corresponding value in the configuration file.
## 1. Setting up a Google Service Account
We recommend copying [this template Google Sheet](https://docs.google.com/spreadsheets/d/1NJZo_XZUBKTI1Ghlgi4nTPVvCfb0HXAs6j5tNGas72k/edit?usp=sharing) as a starting point for your project, as this matches the default column names.
Once your Google Sheet is set up, you need to create what's called a 'service account' that will allow the Auto Archiver to access it.
To do this, you can either:
* a) follow the steps in [this guide](https://gspread.readthedocs.io/en/latest/oauth2.html) all the way up until step 8. You should have downloaded a file called `service_account.json` and should save it in the `secrets/` folder
* b) run the following script to automatically generate the file:
```{code} bash
https://raw.githubusercontent.com/bellingcat/auto-archiver/refs/heads/main/scripts/generate_google_services.sh | bash -s --
```
This uses gcloud to create a new project, a new user and downloads the service account automatically for you. The service account file will have the name `service_account-XXXXXXX.json` where XXXXXXX is a random 16 letter/digit string for the project created.
```{note}
To save the generated file to a different folder, pass an argument as follows:
```{code} bash
https://raw.githubusercontent.com/bellingcat/auto-archiver/refs/heads/main/scripts/generate_google_services.sh | bash -s -- /path/to/secrets
```
----------
Once you've downloaded the file, you can save it to `secrets/service_account.json` (the default name), or to another file and then change the location in the settings (see step 4).
Also make sure to **note down** the email address for this service account. You'll need that for step 3.
```{note}
The email address created in this step can be found either by opening the `service_account.json` file, or if you used b) the `generate_google_services.sh` script, then the script will have printed it out for you.
The email address will look something like `user@project-name.iam.gserviceaccount.com`
```
## 2. Setting up your Google Sheet
We recommend copying [this template Google Sheet](https://docs.google.com/spreadsheets/d/1NJZo_XZUBKTI1Ghlgi4nTPVvCfb0HXAs6j5tNGas72k/edit?usp=sharing) as a starting point for your project, as this matches all the columns required.
But if you like, you can also create your own custom sheet. The only columns required are 'link', 'archive status', and 'archive location'. 'link' is the column with the URLs that you want the Auto Archiver to archive, the other two record the archival status and result.
Here's an overview of all the columns, and what a complete sheet would look like.
@ -46,21 +77,18 @@ In this example the Ghseet Feeder and Gsheet DB are being used, and the archive
![A screenshot of a Google Spreadsheet with column headers defined as above, and several Youtube and Twitter URLs in the "Link" column](../../demo-before.png)
We'll change the name of the 'Destination Folder' column in step 3.
We'll change the name of the 'Destination Folder' column in the Step 4a.
## 2. Setting up your Service Account
## 3. Share your Google Sheet with your Service Account email address
Once your Google Sheet is set up, you need to create what's called a 'service account' that will allow the Auto Archiver to access it.
Remember that email address you copied in Step 1? Now that you've set up your Google sheet, click 'Share' in the top
right hand corner and enter the email address. Make sure to give the account **Editor** access. Here's how that looks:
To do this, follow the steps in [this guide](https://gspread.readthedocs.io/en/latest/oauth2.html) all the way up until step 8. You should have downloaded a file called `service_account.json` and shared the Google Sheet with the log 'client_email' email address in this file.
![Share sheet](share_sheet.png)
Once you've downloaded the file, save it to `secrets/service_account.json`
## 4. Setting up the configuration file
## 3. Setting up the configuration file
Now that you've set up your Google sheet, and you've set up the service account so Auto Archiver can access the sheet, the final step is to set your configuration.
First, make sure you have `gsheet_feeder_db` set in the `steps.feeders` section of your config. If you wish to store the results of the archiving process back in your Google sheet, make sure to also set the `ghseet_db` settig in the `steps.databases` section. Here's how this might look:
The final step is to set your configuration. First, make sure you have `gsheet_feeder_db` set in the `steps.feeders` section of your config. If you wish to store the results of the archiving process back in your Google sheet, make sure to also put `gsheet_feeder_db` setting in the `steps.databases` section. Here's how this might look:
```{code} yaml
steps:
@ -75,12 +103,15 @@ steps:
Next, set up the `gsheet_feeder_db` configuration settings in the 'Configurations' part of the config `orchestration.yaml` file. Open up the file, and set the `gsheet_feeder_db.sheet` setting or the `gsheet_feeder_db.sheet_id` setting. The `sheet` should be the name of your sheet, as it shows in the top left of the sheet.
For example, the sheet [here](https://docs.google.com/spreadsheets/d/1NJZo_XZUBKTI1Ghlgi4nTPVvCfb0HXAs6j5tNGas72k/edit?gid=0#gid=0) is called 'Public Auto Archiver template'.
If you saved your `service_account.json` file to anywhere other than the default location (`secrets/service_account.json`), then also make sure to change that now:
Here's how this might look:
```{code} yaml
...
gsheet_feeder_db:
sheet: 'My Awesome Sheet'
service_account: secrets/service_account-XXXXX.json # or leave as secrets/service_account.json
...
```
@ -90,7 +121,7 @@ You can also pass these settings directly on the command line without having to
Here, the sheet name has been overridden/specified in the command line invocation.
### 3a. (Optional) Changing the column names
### 4a. (Optional) Changing the column names
In step 1, we said we would change the name of the 'Destination Folder'. Perhaps you don't like this name, or already have a sheet with a different name. In our example here, we want to name this column 'Save Folder'. To do this, we need to edit the `ghseet_feeder_db.column` setting in the configuration file.
For more information on this setting, see the [Gsheet Feeder Database docs](../modules/autogen/feeder/gsheet_feeder_db.md#configuration-options). We will first copy the default settings from the Gsheet Feeder docs for the 'column' settings, and then edit the 'Destination Folder' section to rename it 'Save Folder'. Our final configuration section looks like:

Plik binarny nie jest wyświetlany.

Po

Szerokość:  |  Wysokość:  |  Rozmiar: 60 KiB

Wyświetl plik

@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
[project]
name = "auto-archiver"
version = "0.13.7"
version = "0.13.8"
description = "Automatically archive links to videos, images, and social media content from Google Sheets (and more)."
requires-python = ">=3.10,<3.13"

Wyświetl plik

@ -0,0 +1,135 @@
#!/usr/bin/env bash
set -e # Exit on error
UUID=$(LC_ALL=C tr -dc a-z0-9 </dev/urandom | head -c 16)
PROJECT_NAME="auto-archiver-$UUID"
ACCOUNT_NAME="autoarchiver"
KEY_FILE="service_account-$UUID.json"
DEST_DIR="$1"
echo "====================================================="
echo "🔧 Auto-Archiver Google Services Setup Script"
echo "====================================================="
echo "This script will:"
echo " 1. Install Google Cloud SDK if needed"
echo " 2. Create a Google Cloud project named $PROJECT_NAME"
echo " 3. Create a service account for Auto-Archiver"
echo " 4. Generate a key file for API access"
echo ""
echo " Tip: Pass a directory path as an argument to this script to move the key file there"
echo " e.g. ./generate_google_services.sh /path/to/secrets"
echo "====================================================="
# Check and install Google Cloud SDK based on platform
install_gcloud_sdk() {
if command -v gcloud &> /dev/null; then
echo "✅ Google Cloud SDK is already installed"
return 0
fi
echo "📦 Installing Google Cloud SDK..."
# Detect OS
case "$(uname -s)" in
Darwin*)
if command -v brew &> /dev/null; then
echo "🍺 Installing via Homebrew..."
brew install google-cloud-sdk --cask
else
echo "📥 Downloading Google Cloud SDK for macOS..."
curl -O https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-cli-latest-darwin-x86_64.tar.gz
tar -xf google-cloud-cli-latest-darwin-x86_64.tar.gz
./google-cloud-sdk/install.sh --quiet
rm google-cloud-cli-latest-darwin-x86_64.tar.gz
echo "🔄 Please restart your terminal and run this script again"
exit 0
fi
;;
Linux*)
echo "📥 Downloading Google Cloud SDK for Linux..."
curl -O https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-cli-latest-linux-x86_64.tar.gz
tar -xf google-cloud-cli-latest-linux-x86_64.tar.gz
./google-cloud-sdk/install.sh --quiet
rm google-cloud-cli-latest-linux-x86_64.tar.gz
echo "🔄 Please restart your terminal and run this script again"
exit 0
;;
CYGWIN*|MINGW*|MSYS*)
echo "⚠️ Windows detected. Please follow manual installation instructions at:"
echo "https://cloud.google.com/sdk/docs/install-sdk"
exit 1
;;
*)
echo "⚠️ Unknown operating system. Please follow manual installation instructions at:"
echo "https://cloud.google.com/sdk/docs/install-sdk"
exit 1
;;
esac
echo "✅ Google Cloud SDK installed"
}
# Install Google Cloud SDK if needed
install_gcloud_sdk
# Login to Google Cloud
if gcloud auth list --filter=status:ACTIVE --format="value(account)" | grep -q "@"; then
echo "✅ Already authenticated with Google Cloud"
else
echo "🔑 Authenticating with Google Cloud..."
gcloud auth login
fi
# Create project
echo "🌟 Creating Google Cloud project: $PROJECT_NAME"
gcloud projects create $PROJECT_NAME
# Create service account
echo "👤 Creating service account: $ACCOUNT_NAME"
gcloud iam service-accounts create $ACCOUNT_NAME --project $PROJECT_NAME
# Enable required APIs (uncomment and add APIs as needed)
echo "⬆️ Enabling required Google APIs..."
gcloud services enable sheets.googleapis.com --project $PROJECT_NAME
gcloud services enable drive.googleapis.com --project $PROJECT_NAME
# Get the service account email
echo "📧 Retrieving service account email..."
ACCOUNT_EMAIL=$(gcloud iam service-accounts list --project $PROJECT_NAME --format="value(email)")
# Create and download key
echo "🔑 Generating service account key file: $KEY_FILE"
gcloud iam service-accounts keys create $KEY_FILE --iam-account=$ACCOUNT_EMAIL
# move the file to TARGET_DIR if provided
if [[ -n "$DEST_DIR" ]]; then
# Expand `~` if used
DEST_DIR=$(eval echo "$DEST_DIR")
# Ensure the directory exists
if [[ ! -d "$DEST_DIR" ]]; then
mkdir -p "$DEST_DIR"
fi
DEST_PATH="$DEST_DIR/$KEY_FILE"
echo "🚚 Moving key file to: $DEST_PATH"
mv "$KEY_FILE" "$DEST_PATH"
KEY_FILE="$DEST_PATH"
fi
echo "====================================================="
echo "✅ SETUP COMPLETE!"
echo "====================================================="
echo "📝 Important Information:"
echo " • Project Name: $PROJECT_NAME"
echo " • Service Account: $ACCOUNT_EMAIL"
echo " • Key File: $KEY_FILE"
echo ""
echo "📋 Next Steps:"
echo " 1. Share any Google Sheets with this email address:"
echo " $ACCOUNT_EMAIL"
echo " 2. Move $KEY_FILE to your auto-archiver secrets directory"
echo " 3. Update your auto-archiver config to use this key file (if needed)"
echo "====================================================="

Wyświetl plik

@ -71,7 +71,16 @@ class BaseModule(ABC):
:param site: the domain of the site to get authentication information for
:param extract_cookies: whether or not to extract cookies from the given browser/file and return the cookie jar (disabling can speed up processing if you don't actually need the cookies jar).
:returns: authdict dict of login information for the given site
:returns: authdict dict -> {
"username": str,
"password": str,
"api_key": str,
"api_secret": str,
"cookie": str,
"cookies_file": str,
"cookies_from_browser": str,
"cookies_jar": CookieJar
}
**Global options:**\n
* cookies_from_browser: str - the name of the browser to extract cookies from (e.g. 'chrome', 'firefox' - uses ytdlp under the hood to extract\n
@ -85,6 +94,7 @@ class BaseModule(ABC):
* cookie: str - a cookie string to use for login (specific to this site)\n
* cookies_file: str - the path to a cookies file to use for login (specific to this site)\n
* cookies_from_browser: str - the name of the browser to extract cookies from (specitic for this site)\n
"""
# TODO: think about if/how we can deal with sites that have multiple domains (main one is x.com/twitter.com)
# for now the user must enter them both, like "x.com,twitter.com" in their config. Maybe we just hard-code?

Wyświetl plik

@ -5,6 +5,7 @@ by handling user configuration, validating the steps properties, and implementin
"""
from __future__ import annotations
import subprocess
from dataclasses import dataclass
from typing import List, TYPE_CHECKING, Type
@ -17,7 +18,7 @@ import os
from os.path import join
from loguru import logger
import auto_archiver
from auto_archiver.core.consts import DEFAULT_MANIFEST, MANIFEST_FILE
from auto_archiver.core.consts import DEFAULT_MANIFEST, MANIFEST_FILE, SetupError
if TYPE_CHECKING:
from .base_module import BaseModule
@ -85,7 +86,11 @@ class ModuleFactory:
if not available:
message = f"Module '{module_name}' not found. Are you sure it's installed/exists?"
if "archiver" in module_name:
message += f" Did you mean {module_name.replace('archiver', 'extractor')}?"
message += f" Did you mean '{module_name.replace('archiver', 'extractor')}'?"
elif "gsheet" in module_name:
message += " Did you mean 'gsheet_feeder_db'?"
elif "atlos" in module_name:
message += " Did you mean 'atlos_feeder_db_storage'?"
raise IndexError(message)
return available[0]
@ -216,9 +221,9 @@ class LazyBaseModule:
if not check(dep):
logger.error(
f"Module '{self.name}' requires external dependency '{dep}' which is not available/setup. \
Have you installed the required dependencies for the '{self.name}' module? See the README for more information."
Have you installed the required dependencies for the '{self.name}' module? See the documentation for more information."
)
exit(1)
raise SetupError()
def check_python_dep(dep):
# first check if it's a module:
@ -237,8 +242,22 @@ class LazyBaseModule:
return find_spec(dep)
def check_bin_dep(dep):
dep_exists = shutil.which(dep)
if dep == "docker":
if os.environ.get("RUNNING_IN_DOCKER"):
# this is only for the WACZ enricher, which requires docker
# if we're already running in docker then we don't need docker
return True
# check if docker daemon is running
return dep_exists and subprocess.run(["docker", "ps", "-q"]).returncode == 0
return dep_exists
check_deps(self.dependencies.get("python", []), check_python_dep)
check_deps(self.dependencies.get("bin", []), lambda dep: shutil.which(dep))
check_deps(self.dependencies.get("bin", []), check_bin_dep)
logger.debug(f"Loading module '{self.display_name}'...")

Wyświetl plik

@ -373,9 +373,17 @@ Here's how that would look: \n\nsteps:\n extractors:\n - [your_extractor_name_
if module in invalid_modules:
continue
# check to make sure that we're trying to load it as the correct type - i.e. make sure the user hasn't put it under the wrong 'step'
lazy_module: LazyBaseModule = self.module_factory.get_module_lazy(module)
if module_type not in lazy_module.type:
types = ",".join(f"'{t}'" for t in lazy_module.type)
raise SetupError(
f"Configuration Error: Module '{module}' is not a {module_type}, but has the types: {types}. Please check you set this module up under the right step in your orchestration file."
)
loaded_module = None
try:
loaded_module: BaseModule = self.module_factory.get_module(module, self.config)
loaded_module: BaseModule = lazy_module.load(self.config)
except (KeyboardInterrupt, Exception) as e:
if not isinstance(e, KeyboardInterrupt) and not isinstance(e, SetupError):
logger.error(f"Error during setup of modules: {e}\n{traceback.format_exc()}")

Wyświetl plik

@ -74,10 +74,6 @@ If you are having issues with the extractor, you can review the version of `yt-d
"default": "inf",
"help": "Use to limit the number of videos to download when a channel or long page is being extracted. 'inf' means no limit.",
},
"pot_provider": {
"default": "bgutils",
"help": "The Proof of origin provider method.",
},
"extractor_args": {
"default": {},
"help": "Additional arguments to pass to the yt-dlp extractor. See https://github.com/yt-dlp/yt-dlp/blob/master/README.md#extractor-arguments.",

Wyświetl plik

@ -1,6 +1,5 @@
import re
import mimetypes
import json
from loguru import logger
from slugify import slugify
@ -32,6 +31,9 @@ class Twitter(GenericDropin):
twid = ie_instance._match_valid_url(url).group("id")
return ie_instance._extract_status(twid=twid)
def keys_to_clean(self, video_data, info_extractor):
return ["user", "created_at", "entities", "favorited", "translator_type"]
def create_metadata(self, tweet: dict, ie_instance: InfoExtractor, archiver: Extractor, url: str) -> Metadata:
result = Metadata()
try:
@ -42,9 +44,11 @@ class Twitter(GenericDropin):
logger.warning(f"Unable to parse tweet: {str(ex)}\nRetreived tweet data: {tweet}")
return False
result.set_title(tweet.get("full_text", "")).set_content(json.dumps(tweet, ensure_ascii=False)).set_timestamp(
timestamp
)
full_text = tweet.pop("full_text", "")
author = tweet["user"].get("name", "")
result.set("author", author).set_url(url)
result.set_title(f"{author} - {full_text}").set_content(full_text).set_timestamp(timestamp)
if not tweet.get("entities", {}).get("media"):
logger.debug("No media found, archiving tweet text only")
result.status = "twitter-ytdl"

Wyświetl plik

@ -70,10 +70,14 @@
- Skips redundant updates for empty or invalid data fields.
### Setup
- Requires a Google Service Account JSON file for authentication, which should be stored in `secrets/gsheets_service_account.json`.
To set up a service account, follow the instructions [here](https://gspread.readthedocs.io/en/latest/oauth2.html).
- Define the `sheet` or `sheet_id` configuration to specify the sheet to archive.
- Customize the column names in your Google sheet using the `columns` configuration.
- The Google Sheet can be used soley as a feeder or as a feeder and database, but note you can't currently feed into the database from an alternate feeder.
1. Requires a Google Service Account JSON file for authentication.
To set up a service account, follow the instructions in the [how to](https://auto-archiver.readthedocs.io/en/latest/how_to/gsheets_setup.html),
or use the script:
```
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/bellingcat/auto-archiver/refs/heads/main/scripts/generate_google_services.sh)"
```
2. Create a Google sheet with the required column(s) and then define the `sheet` or `sheet_id` configuration to specify this sheet.
3. Customize the column names in your Google sheet using the `columns` configuration.
4. The Google Sheet can be used solely as a feeder or as a feeder and database, but note you can't currently feed into the database from an alternate feeder.
""",
}

Wyświetl plik

@ -29,6 +29,9 @@ class InstagramExtractor(Extractor):
# TODO: links to stories
def setup(self) -> None:
logger.warning("Instagram Extractor is not actively maintained, and may not work as expected.")
logger.warning("Please consider using the Instagram Tbot Extractor or Instagram API Extractor instead.")
self.insta = instaloader.Instaloader(
download_geotags=True,
download_comments=True,

Wyświetl plik

@ -19,12 +19,21 @@ class ScreenshotEnricher(Enricher):
def enrich(self, to_enrich: Metadata) -> None:
url = to_enrich.get_url()
if UrlUtil.is_auth_wall(url):
logger.debug(f"[SKIP] SCREENSHOT since url is behind AUTH WALL: {url=}")
return
logger.debug(f"Enriching screenshot for {url=}")
auth = self.auth_for_site(url)
# screenshot enricher only supports cookie-type auth (selenium)
has_valid_auth = auth and (auth.get("cookies") or auth.get("cookies_jar") or auth.get("cookie"))
if UrlUtil.is_auth_wall(url) and not has_valid_auth:
logger.warning(f"[SKIP] SCREENSHOT since url is behind AUTH WALL and no login details provided: {url=}")
if any(auth.get(key) for key in ["username", "password", "api_key", "api_secret"]):
logger.warning(
f"Screenshot enricher only supports cookie-type authentication, you have provided {auth.keys()} which are not supported.\
Consider adding 'cookie', 'cookies_file' or 'cookies_from_browser' to your auth for this site."
)
return
with self.webdriver_factory(
self.width,
self.height,

Wyświetl plik

@ -11,7 +11,7 @@
"configs": {
"profile": {
"default": None,
"help": "browsertrix-profile (for profile generation see https://github.com/webrecorder/browsertrix-crawler#creating-and-using-browser-profiles).",
"help": "browsertrix-profile (for profile generation see https://crawler.docs.browsertrix.com/user-guide/browser-profiles/).",
},
"docker_commands": {"default": None, "help": "if a custom docker invocation is needed"},
"timeout": {"default": 120, "help": "timeout for WACZ generation in seconds", "type": "int"},
@ -40,14 +40,27 @@
Creates .WACZ archives of web pages using the `browsertrix-crawler` tool, with options for media extraction and screenshot saving.
[Browsertrix-crawler](https://crawler.docs.browsertrix.com/user-guide/) is a headless browser-based crawler that archives web pages in WACZ format.
### Features
## Setup
**Docker**
If you are using the Docker file to run Auto Archiver (recommended), then everything is set up and you can use WACZ out of the box!
Otherwise, if you are using a local install of Auto Archiver (e.g. pip or dev install), then you will need to install Docker and run
the docker daemon to be able to run the `browsertrix-crawler` tool.
**Browsertrix Profiles**
A browsertrix profile is a custom browser profile (login information, browser extensions, etc.) that can be used to archive private or dynamic content.
You can run the WACZ Enricher without a profile, but for more resilient archiving, it is recommended to create a profile. See the [Browsertrix documentation](https://crawler.docs.browsertrix.com/user-guide/browser-profiles/)
for more information.
** Docker in Docker **
If you are running Auto Archiver within a Docker container, you will need to enable Docker in Docker to run the `browsertrix-crawler` tool.
This can be done by setting the `WACZ_ENABLE_DOCKER` environment variable to `1`.
## Features
- Archives web pages into .WACZ format using Docker or direct invocation of `browsertrix-crawler`.
- Supports custom profiles for archiving private or dynamic content.
- Extracts media (images, videos, audio) and screenshots from the archive, optionally adding them to the enrichment pipeline.
- Generates metadata from the archived page's content and structure (e.g., titles, text).
### Notes
- Requires Docker for running `browsertrix-crawler` .
- Configurable via parameters for timeout, media extraction, screenshots, and proxy settings.
""",
}

Wyświetl plik

@ -24,7 +24,8 @@ class WaczExtractorEnricher(Enricher, Extractor):
self.use_docker = os.environ.get("WACZ_ENABLE_DOCKER") or not os.environ.get("RUNNING_IN_DOCKER")
self.docker_in_docker = os.environ.get("WACZ_ENABLE_DOCKER") and os.environ.get("RUNNING_IN_DOCKER")
self.cwd_dind = f"/crawls/crawls{random_str(8)}"
self.crawl_id = random_str(8)
self.cwd_dind = f"/crawls/crawls{self.crawl_id}"
self.browsertrix_home_host = os.environ.get("BROWSERTRIX_HOME_HOST")
self.browsertrix_home_container = os.environ.get("BROWSERTRIX_HOME_CONTAINER") or self.browsertrix_home_host
# create crawls folder if not exists, so it can be safely removed in cleanup
@ -50,7 +51,7 @@ class WaczExtractorEnricher(Enricher, Extractor):
url = to_enrich.get_url()
collection = random_str(8)
collection = self.crawl_id
browsertrix_home_host = self.browsertrix_home_host or os.path.abspath(self.tmp_dir)
browsertrix_home_container = self.browsertrix_home_container or browsertrix_home_host
@ -102,10 +103,11 @@ class WaczExtractorEnricher(Enricher, Extractor):
] + cmd
if self.profile:
profile_fn = os.path.join(browsertrix_home_container, "profile.tar.gz")
profile_file = f"profile-{self.crawl_id}.tar.gz"
profile_fn = os.path.join(browsertrix_home_container, profile_file)
logger.debug(f"copying {self.profile} to {profile_fn}")
shutil.copyfile(self.profile, profile_fn)
cmd.extend(["--profile", os.path.join("/crawls", "profile.tar.gz")])
cmd.extend(["--profile", os.path.join("/crawls", profile_file)])
else:
logger.debug(f"generating WACZ without Docker for {url=}")

Wyświetl plik

@ -4,8 +4,8 @@ from ipaddress import ip_address
AUTHWALL_URLS = [
re.compile(r"https:\/\/t\.me(\/c)\/(.+)\/(\d+)"), # telegram private channels
re.compile(r"https:\/\/www\.instagram\.com"), # instagram
re.compile(r"https?:\/\/t\.me(\/c)\/(.+)\/(\d+)"), # telegram private channels
re.compile(r"https?:\/\/(www\.)?instagram\.com"), # instagram
]
@ -81,56 +81,43 @@ def is_relevant_url(url: str) -> bool:
"""
clean_url = remove_get_parameters(url)
# favicons
if "favicon" in url:
return False
# ifnore icons
if clean_url.endswith(".ico"):
return False
# ignore SVGs
if remove_get_parameters(url).endswith(".svg"):
return False
IRRELEVANT_URLS = [
# favicons
("favicon",),
# twitter profile pictures
("twimg.com/profile_images",),
("twimg.com", "default_profile_images"),
# instagram profile pictures
("https://scontent.cdninstagram.com/", "150x150"),
# instagram recurring images
("https://static.cdninstagram.com/rsrc.php/",),
# telegram
("https://telegram.org/img/emoji/",),
# youtube
("https://www.youtube.com/s/gaming/emoji/",),
("https://yt3.ggpht.com", "default-user="),
("https://www.youtube.com/s/search/audio/",),
# ok
("https://ok.ru/res/i/",),
("https://vk.com/emoji/",),
("vk.com/images/",),
("vk.com/images/reaction/",),
# wikipedia
("wikipedia.org/static",),
]
# twitter profile pictures
if "twimg.com/profile_images" in url:
return False
if "twimg.com" in url and "/default_profile_images" in url:
return False
IRRELEVANT_ENDS_WITH = [
".svg", # ignore SVGs
".ico", # ignore icons
]
# instagram profile pictures
if "https://scontent.cdninstagram.com/" in url and "150x150" in url:
return False
# instagram recurring images
if "https://static.cdninstagram.com/rsrc.php/" in url:
return False
for end in IRRELEVANT_ENDS_WITH:
if clean_url.endswith(end):
return False
# telegram
if "https://telegram.org/img/emoji/" in url:
return False
# youtube
if "https://www.youtube.com/s/gaming/emoji/" in url:
return False
if "https://yt3.ggpht.com" in url and "default-user=" in url:
return False
if "https://www.youtube.com/s/search/audio/" in url:
return False
# ok
if " https://ok.ru/res/i/" in url:
return False
# vk
if "https://vk.com/emoji/" in url:
return False
if "vk.com/images/" in url:
return False
if "vk.com/images/reaction/" in url:
return False
# wikipedia
if "wikipedia.org/static" in url:
return False
for parts in IRRELEVANT_URLS:
if all(part in clean_url for part in parts):
return False
return True

Wyświetl plik

@ -22,35 +22,35 @@ from loguru import logger
class CookieSettingDriver(webdriver.Firefox):
facebook_accept_cookies: bool
cookies: str
cookiejar: MozillaCookieJar
cookie: str
cookie_jar: MozillaCookieJar
def __init__(self, cookies, cookiejar, facebook_accept_cookies, *args, **kwargs):
def __init__(self, cookie, cookie_jar, facebook_accept_cookies, *args, **kwargs):
if os.environ.get("RUNNING_IN_DOCKER"):
# Selenium doesn't support linux-aarch64 driver, we need to set this manually
kwargs["service"] = webdriver.FirefoxService(executable_path="/usr/local/bin/geckodriver")
super(CookieSettingDriver, self).__init__(*args, **kwargs)
self.cookies = cookies
self.cookiejar = cookiejar
self.cookie = cookie
self.cookie_jar = cookie_jar
self.facebook_accept_cookies = facebook_accept_cookies
def get(self, url: str):
if self.cookies or self.cookiejar:
if self.cookie_jar or self.cookie:
# set up the driver to make it not 'cookie averse' (needs a context/URL)
# get the 'robots.txt' file which should be quick and easy
robots_url = urlunparse(urlparse(url)._replace(path="/robots.txt", query="", fragment=""))
super(CookieSettingDriver, self).get(robots_url)
if self.cookies:
if self.cookie:
# an explicit cookie is set for this site, use that first
for cookie in self.cookies.split(";"):
for name, value in cookie.split("="):
self.driver.add_cookie({"name": name, "value": value})
elif self.cookiejar:
domain = urlparse(url).netloc
elif self.cookie_jar:
domain = urlparse(url).netloc.removeprefix("www.")
regex = re.compile(f"(www)?.?{domain}$")
for cookie in self.cookiejar:
for cookie in self.cookie_jar:
if regex.match(cookie.domain):
try:
self.add_cookie(
@ -145,8 +145,8 @@ class Webdriver:
try:
self.driver = CookieSettingDriver(
cookies=self.auth.get("cookies"),
cookiejar=self.auth.get("cookies_jar"),
cookie=self.auth.get("cookie"),
cookie_jar=self.auth.get("cookies_jar"),
facebook_accept_cookies=self.facebook_accept_cookies,
options=options,
)

Wyświetl plik

@ -0,0 +1,11 @@
{
# Display Name of your module
"name": "Example Extractor",
# Optional version number, for your own versioning purposes
"version": 2.0,
# The type of the module, must be one (or more) of the built in module types
"type": ["extractor"],
# a boolean indicating whether or not a module requires additional user setup before it can be used
# for example: adding API keys, installing additional software etc.
"requires_setup": False,
}

Wyświetl plik

@ -0,0 +1,6 @@
from auto_archiver.core import Extractor
class ExampleExtractor(Extractor):
def download(self, item):
print("download")

Wyświetl plik

@ -85,8 +85,8 @@ def test_enrich_adds_screenshot(
mock_driver, mock_driver_class, mock_options_instance = mock_selenium_env
screenshot_enricher.enrich(metadata_with_video)
mock_driver_class.assert_called_once_with(
cookies=None,
cookiejar=None,
cookie=None,
cookie_jar=None,
facebook_accept_cookies=False,
options=mock_options_instance,
)
@ -124,6 +124,38 @@ def test_enrich_auth_wall(
assert metadata_with_video.media[1].properties.get("id") == "screenshot"
def test_skip_authwall_no_cookies(screenshot_enricher, caplog):
with caplog.at_level("WARNING"):
screenshot_enricher.enrich(Metadata().set_url("https://instagram.com"))
assert "[SKIP] SCREENSHOT since url" in caplog.text
@pytest.mark.parametrize(
"auth",
[
{"cookie": "cookie"},
{"cookies_jar": "cookie"},
],
)
def test_dont_skip_authwall_with_cookies(screenshot_enricher, caplog, mocker, mock_selenium_env, auth):
mocker.patch("auto_archiver.utils.url.is_auth_wall", return_value=True)
# patch the authentication dict:
screenshot_enricher.authentication = {"example.com": auth}
with caplog.at_level("WARNING"):
screenshot_enricher.enrich(Metadata().set_url("https://example.com"))
assert "[SKIP] SCREENSHOT since url" not in caplog.text
def test_show_warning_wrong_auth_type(screenshot_enricher, caplog, mocker, mock_selenium_env):
mock_driver, mock_driver_class, _ = mock_selenium_env
mocker.patch("auto_archiver.utils.url.is_auth_wall", return_value=True)
screenshot_enricher.authentication = {"example.com": {"username": "user", "password": "pass"}}
with caplog.at_level("WARNING"):
screenshot_enricher.enrich(Metadata().set_url("https://example.com"))
assert "Screenshot enricher only supports cookie-type authentication" in caplog.text
def test_handle_timeout_exception(screenshot_enricher, metadata_with_video, mock_selenium_env, mocker):
mock_driver, mock_driver_class, mock_options_instance = mock_selenium_env

Wyświetl plik

@ -4,6 +4,7 @@ from zipfile import ZipFile
import pytest
from auto_archiver.core import Metadata, Media
from auto_archiver.core.consts import SetupError
@pytest.fixture
@ -22,6 +23,15 @@ def wacz_enricher(setup_module, mock_binary_dependencies):
return wacz
def test_raises_error_without_docker_installed(setup_module, mocker, caplog):
# pretend that docker isn't installed
mocker.patch("shutil.which").return_value = None
with pytest.raises(SetupError):
setup_module("wacz_extractor_enricher", {})
assert "requires external dependency 'docker' which is not available/setup" in caplog.text
def test_setup_without_docker(wacz_enricher, mocker):
mocker.patch.dict(os.environ, {"RUNNING_IN_DOCKER": "1"}, clear=True)
wacz_enricher.setup()

Wyświetl plik

@ -207,10 +207,11 @@ class TestGenericExtractor(TestExtractorBase):
self.assertValidResponseMetadata(
post,
"Onion rings are just vegetable donuts.",
"Cookie Monster - Onion rings are just vegetable donuts.",
datetime.datetime(2023, 1, 24, 16, 25, 51, tzinfo=datetime.timezone.utc),
"yt-dlp_Twitter: success",
)
assert post.get("content") == "Onion rings are just vegetable donuts."
@pytest.mark.download
def test_twitter_download_video(self, make_item):

Wyświetl plik

@ -1,6 +1,7 @@
import pytest
from auto_archiver.core.module import ModuleFactory, LazyBaseModule
from auto_archiver.core.base_module import BaseModule
from auto_archiver.core.consts import SetupError
@pytest.fixture
@ -25,11 +26,9 @@ def test_python_dependency_check(example_module):
# monkey patch the manifest to include a nonexistnet dependency
example_module.manifest["dependencies"]["python"] = ["does_not_exist"]
with pytest.raises(SystemExit) as load_error:
with pytest.raises(SetupError):
example_module.load({})
assert load_error.value.code == 1
def test_binary_dependency_check(example_module):
# example_module requires ffmpeg, which is not installed
@ -81,8 +80,20 @@ def test_load_modules(module_name):
# check that default settings are applied
default_config = module.configs
assert loaded_module.name in loaded_module.config.keys()
defaults = {k for k in default_config}
assert defaults in [loaded_module.config[module_name].keys()]
@pytest.mark.parametrize("module_name", ["local_storage", "generic_extractor", "html_formatter", "csv_db"])
def test_config_defaults(module_name):
# test the values of the default config values are set
# Note: some modules can alter values in the setup() method, this test checks cases that don't
module = ModuleFactory().get_module_lazy(module_name)
loaded_module = module.load({})
# check that default config values are set
default_config = module.configs
defaults = {k: v.get("default") for k, v in default_config.items()}
assert defaults.keys() in [loaded_module.config[module_name].keys()]
assert defaults == loaded_module.config[module_name]
@pytest.mark.parametrize("module_name", ["local_storage", "generic_extractor", "html_formatter", "csv_db"])

Wyświetl plik

@ -4,6 +4,7 @@ from auto_archiver.core.orchestrator import ArchivingOrchestrator
from auto_archiver.version import __version__
from auto_archiver.core.config import read_yaml, store_yaml
from auto_archiver.core import Metadata
from auto_archiver.core.consts import SetupError
TEST_ORCHESTRATION = "tests/data/test_orchestration.yaml"
TEST_MODULES = "tests/data/test_modules/"
@ -224,3 +225,15 @@ def test_multiple_orchestrator(test_args):
output: Metadata = list(o2.feed())
assert len(output) == 1
assert output[0].get_url() == "https://example.com"
def test_wrong_step_type(test_args, caplog):
args = test_args + [
"--feeders",
"example_extractor", # example_extractor is not a valid feeder!
]
orchestrator = ArchivingOrchestrator()
with pytest.raises(SetupError) as err:
orchestrator.setup(args)
assert "Module 'example_extractor' is not a feeder" in str(err.value)

Wyświetl plik

@ -0,0 +1,113 @@
import pytest
from auto_archiver.utils.url import (
is_auth_wall,
check_url_or_raise,
domain_for_url,
is_relevant_url,
remove_get_parameters,
twitter_best_quality_url,
)
@pytest.mark.parametrize(
"url, is_auth",
[
("https://example.com", False),
("https://t.me/c/abc/123", True),
("https://t.me/not-private/", False),
("https://instagram.com", True),
("https://www.instagram.com", True),
("https://www.instagram.com/p/INVALID", True),
("https://www.instagram.com/p/C4QgLbrIKXG/", True),
],
)
def test_is_auth_wall(url, is_auth):
assert is_auth_wall(url) == is_auth
@pytest.mark.parametrize(
"url, raises",
[
("http://example.com", False),
("https://example.com", False),
("ftp://example.com", True),
("http://localhost", True),
("http://", True),
],
)
def test_check_url_or_raise(url, raises):
if raises:
with pytest.raises(ValueError):
check_url_or_raise(url)
else:
assert check_url_or_raise(url)
@pytest.mark.parametrize(
"url, domain",
[
("https://example.com", "example.com"),
("https://www.example.com", "www.example.com"),
("https://www.example.com/path", "www.example.com"),
("https://", ""),
("http://localhost", "localhost"),
],
)
def test_domain_for_url(url, domain):
assert domain_for_url(url) == domain
@pytest.mark.parametrize(
"url, without_get",
[
("https://example.com", "https://example.com"),
("https://example.com?utm_source=example", "https://example.com"),
("https://example.com?utm_source=example&other=1", "https://example.com"),
("https://example.com/something", "https://example.com/something"),
("https://example.com/something?utm_source=example", "https://example.com/something"),
],
)
def test_remove_get_parameters(url, without_get):
assert remove_get_parameters(url) == without_get
@pytest.mark.parametrize(
"url, relevant",
[
("https://example.com", True),
("https://example.com/favicon.ico", False),
("https://twimg.com/profile_images", False),
("https://twimg.com/something/default_profile_images", False),
("https://scontent.cdninstagram.com/username/150x150.jpg", False),
("https://static.cdninstagram.com/rsrc.php/", False),
("https://telegram.org/img/emoji/", False),
("https://www.youtube.com/s/gaming/emoji/", False),
("https://yt3.ggpht.com/default-user=", False),
("https://www.youtube.com/s/search/audio/", False),
("https://ok.ru/res/i/", False),
("https://vk.com/emoji/", False),
("https://vk.com/images/", False),
("https://vk.com/images/reaction/", False),
("https://wikipedia.org/static", False),
("https://example.com/file.svg", False),
("https://example.com/file.ico", False),
("https://example.com/file.mp4", True),
("https://example.com/150x150.jpg", True),
("https://example.com/rsrc.php/", True),
("https://example.com/img/emoji/", True),
],
)
def test_is_relevant_url(url, relevant):
assert is_relevant_url(url) == relevant
@pytest.mark.parametrize(
"url, best_quality",
[
("https://twitter.com/some_image.jpg?name=small", "https://twitter.com/some_image.jpg?name=orig"),
("https://twitter.com/some_image.jpg", "https://twitter.com/some_image.jpg"),
("https://twitter.com/some_image.jpg?name=orig", "https://twitter.com/some_image.jpg?name=orig"),
],
)
def test_twitter_best_quality_url(url, best_quality):
assert twitter_best_quality_url(url) == best_quality